summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2018-04-10 21:39:51 -0400
committerMyles Borins <mylesborins@google.com>2018-04-11 13:22:42 -0400
commit12a1b9b8049462e47181a298120243dc83e81c55 (patch)
tree8605276308c8b4e3597516961266bae1af57557a
parent78cd8263354705b767ef8c6a651740efe4931ba0 (diff)
downloadandroid-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.tar.bz2
android-node-v8-12a1b9b8049462e47181a298120243dc83e81c55.zip
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
-rw-r--r--deps/v8/.gitignore4
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/BUILD.gn102
-rw-r--r--deps/v8/ChangeLog1745
-rw-r--r--deps/v8/DEPS126
-rw-r--r--deps/v8/Makefile493
-rw-r--r--deps/v8/Makefile.android72
-rw-r--r--deps/v8/OWNERS7
-rw-r--r--deps/v8/PRESUBMIT.py92
-rw-r--r--deps/v8/gni/msvs_dependencies.isolate (renamed from deps/v8/gypfiles/win/msvs_dependencies.isolate)0
-rw-r--r--deps/v8/gni/v8.gni10
-rw-r--r--deps/v8/gypfiles/all.gyp39
-rw-r--r--deps/v8/gypfiles/cctest.gyp468
-rw-r--r--[-rwxr-xr-x]deps/v8/gypfiles/coverage_wrapper.py0
-rw-r--r--deps/v8/gypfiles/d8.gyp (renamed from deps/v8/src/d8.gyp)68
-rw-r--r--deps/v8/gypfiles/features.gypi8
-rw-r--r--deps/v8/gypfiles/fuzzer.gyp (renamed from deps/v8/test/fuzzer/fuzzer.gyp)364
-rw-r--r--deps/v8/gypfiles/gmock.gyp72
-rw-r--r--deps/v8/gypfiles/gtest.gyp (renamed from deps/v8/testing/gtest.gyp)70
-rw-r--r--[-rwxr-xr-x]deps/v8/gypfiles/gyp_v818
-rw-r--r--deps/v8/gypfiles/inspector-test.gyp39
-rw-r--r--deps/v8/gypfiles/inspector.gyp (renamed from deps/v8/src/inspector/inspector.gyp)23
-rw-r--r--deps/v8/gypfiles/inspector.gypi90
-rw-r--r--deps/v8/gypfiles/isolate.gypi100
-rw-r--r--deps/v8/gypfiles/landmine_utils.py123
-rwxr-xr-xdeps/v8/gypfiles/landmines.py245
-rw-r--r--deps/v8/gypfiles/mkgrokdump.gyp27
-rw-r--r--deps/v8/gypfiles/parser-shell.gyp (renamed from deps/v8/tools/parser-shell.gyp)12
-rw-r--r--[-rwxr-xr-x]deps/v8/gypfiles/run-tests-legacy.py0
-rw-r--r--deps/v8/gypfiles/samples.gyp61
-rw-r--r--[-rwxr-xr-x]deps/v8/gypfiles/sysroot_ld_flags.sh0
-rw-r--r--deps/v8/gypfiles/toolchain.gypi34
-rw-r--r--deps/v8/gypfiles/unittests.gyp287
-rw-r--r--deps/v8/gypfiles/v8.gyp2613
-rw-r--r--deps/v8/gypfiles/v8vtune.gyp36
-rw-r--r--[-rwxr-xr-x]deps/v8/gypfiles/verify_source_deps.py (renamed from deps/v8/tools/verify_source_deps.py)26
-rw-r--r--deps/v8/include/PRESUBMIT.py2
-rw-r--r--deps/v8/include/v8-debug.h255
-rw-r--r--deps/v8/include/v8-profiler.h72
-rw-r--r--deps/v8/include/v8-util.h10
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h257
-rw-r--r--deps/v8/infra/config/cq.cfg10
-rw-r--r--deps/v8/infra/mb/mb_config.pyl853
-rw-r--r--deps/v8/samples/process.cc9
-rw-r--r--deps/v8/samples/samples.gyp84
-rw-r--r--deps/v8/src/PRESUBMIT.py2
-rw-r--r--deps/v8/src/allocation.cc7
-rw-r--r--deps/v8/src/api-arguments.cc7
-rw-r--r--deps/v8/src/api-arguments.h9
-rw-r--r--deps/v8/src/api-natives.cc15
-rw-r--r--deps/v8/src/api-natives.h4
-rw-r--r--deps/v8/src/api.cc550
-rw-r--r--deps/v8/src/api.h4
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h63
-rw-r--r--deps/v8/src/arm/assembler-arm.cc62
-rw-r--r--deps/v8/src/arm/assembler-arm.h98
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc16
-rw-r--r--deps/v8/src/arm/codegen-arm.cc10
-rw-r--r--deps/v8/src/arm/constants-arm.h4
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc3
-rw-r--r--deps/v8/src/arm/disasm-arm.cc10
-rw-r--r--deps/v8/src/arm/frame-constants-arm.h6
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc6
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc165
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h30
-rw-r--r--deps/v8/src/arm/simulator-arm.cc23
-rw-r--r--deps/v8/src/arm/simulator-arm.h4
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h40
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc20
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h34
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc38
-rw-r--r--deps/v8/src/arm64/constants-arm64.h11
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h6
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc7
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc20
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h6
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc8
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h6
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h6
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc25
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc9
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h137
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc388
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h143
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc60
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h1
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/asmjs/asm-js.cc44
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc6
-rw-r--r--deps/v8/src/asmjs/switch-logic.h6
-rw-r--r--deps/v8/src/assembler.cc106
-rw-r--r--deps/v8/src/assembler.h107
-rw-r--r--deps/v8/src/ast/OWNERS1
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.h6
-rw-r--r--deps/v8/src/ast/ast-numbering.cc410
-rw-r--r--deps/v8/src/ast/ast-numbering.h55
-rw-r--r--deps/v8/src/ast/ast.cc6
-rw-r--r--deps/v8/src/ast/ast.h106
-rw-r--r--deps/v8/src/ast/compile-time-value.h6
-rw-r--r--deps/v8/src/ast/prettyprinter.cc24
-rw-r--r--deps/v8/src/ast/scopes.cc57
-rw-r--r--deps/v8/src/ast/scopes.h18
-rw-r--r--deps/v8/src/bailout-reason.h5
-rw-r--r--deps/v8/src/base.isolate2
-rw-r--r--deps/v8/src/base/atomic-utils.h6
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h4
-rw-r--r--deps/v8/src/base/atomicops_internals_std.h4
-rw-r--r--deps/v8/src/base/cpu.cc3
-rw-r--r--deps/v8/src/base/file-utils.h2
-rw-r--r--deps/v8/src/base/format-macros.h6
-rw-r--r--deps/v8/src/base/logging.cc38
-rw-r--r--deps/v8/src/base/logging.h12
-rw-r--r--deps/v8/src/base/macros.h2
-rw-r--r--deps/v8/src/base/optional.h8
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc16
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc21
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc9
-rw-r--r--deps/v8/src/base/platform/time.cc370
-rw-r--r--deps/v8/src/base/platform/time.h138
-rw-r--r--deps/v8/src/base/qnx-math.h2
-rw-r--r--deps/v8/src/base/sys-info.cc6
-rw-r--r--deps/v8/src/base/template-utils.h6
-rw-r--r--deps/v8/src/base/v8-fallthrough.h21
-rw-r--r--deps/v8/src/bit-vector.h6
-rw-r--r--deps/v8/src/bootstrapper.cc402
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc129
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc167
-rw-r--r--deps/v8/src/builtins/builtins-api.cc16
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc1355
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h156
-rw-r--r--deps/v8/src/builtins/builtins-array.cc7
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc96
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc211
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.h49
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc223
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc39
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc61
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc405
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc135
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc88
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h150
-rw-r--r--deps/v8/src/builtins/builtins-function.cc20
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc359
-rw-r--r--deps/v8/src/builtins/builtins-intl.h2
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc332
-rw-r--r--deps/v8/src/builtins/builtins-object.cc25
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc1755
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h81
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc63
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc371
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h20
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc79
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc328
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc1278
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.h133
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc70
-rw-r--r--deps/v8/src/builtins/builtins.cc1138
-rw-r--r--deps/v8/src/builtins/builtins.h22
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc83
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h48
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc100
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.h56
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc107
-rw-r--r--deps/v8/src/builtins/mips/OWNERS1
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc121
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS1
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc128
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc136
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc134
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc4
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc115
-rw-r--r--deps/v8/src/code-events.h21
-rw-r--r--deps/v8/src/code-stub-assembler.cc1579
-rw-r--r--deps/v8/src/code-stub-assembler.h337
-rw-r--r--deps/v8/src/code-stubs.cc46
-rw-r--r--deps/v8/src/code-stubs.h4
-rw-r--r--deps/v8/src/compilation-cache.cc85
-rw-r--r--deps/v8/src/compilation-cache.h49
-rw-r--r--deps/v8/src/compilation-dependencies.h6
-rw-r--r--deps/v8/src/compilation-info.cc28
-rw-r--r--deps/v8/src/compilation-info.h87
-rw-r--r--deps/v8/src/compilation-statistics.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc6
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc2
-rw-r--r--deps/v8/src/compiler.cc622
-rw-r--r--deps/v8/src/compiler.h99
-rw-r--r--deps/v8/src/compiler/OWNERS4
-rw-r--r--deps/v8/src/compiler/access-builder.cc16
-rw-r--r--deps/v8/src/compiler/access-builder.h8
-rw-r--r--deps/v8/src/compiler/access-info.cc18
-rw-r--r--deps/v8/src/compiler/access-info.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc878
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc267
-rw-r--r--deps/v8/src/compiler/arm/unwinding-info-writer-arm.h6
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc522
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h3
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc3
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc327
-rw-r--r--deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h6
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h2
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc287
-rw-r--r--deps/v8/src/compiler/branch-elimination.h75
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc334
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h50
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc308
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h26
-rw-r--r--deps/v8/src/compiler/c-linkage.cc3
-rw-r--r--deps/v8/src/compiler/code-assembler.cc105
-rw-r--r--deps/v8/src/compiler/code-assembler.h61
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h7
-rw-r--r--deps/v8/src/compiler/code-generator.cc228
-rw-r--r--deps/v8/src/compiler/code-generator.h57
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc56
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h4
-rw-r--r--deps/v8/src/compiler/common-operator.cc262
-rw-r--r--deps/v8/src/compiler/common-operator.h64
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc10
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc369
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h7
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc5
-rw-r--r--deps/v8/src/compiler/frame-states.cc67
-rw-r--r--deps/v8/src/compiler/frame-states.h14
-rw-r--r--deps/v8/src/compiler/functional-list.h122
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc4
-rw-r--r--deps/v8/src/compiler/graph-assembler.h7
-rw-r--r--deps/v8/src/compiler/graph-reducer.h2
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc855
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h12
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc10
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc315
-rw-r--r--deps/v8/src/compiler/instruction-codes.h100
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc89
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h86
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc244
-rw-r--r--deps/v8/src/compiler/instruction-selector.h28
-rw-r--r--deps/v8/src/compiler/instruction.cc24
-rw-r--r--deps/v8/src/compiler/instruction.h12
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc34
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc796
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h40
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc2144
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h70
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc176
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc76
-rw-r--r--deps/v8/src/compiler/js-graph.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc12
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc84
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h10
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc187
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h6
-rw-r--r--deps/v8/src/compiler/js-operator.cc103
-rw-r--r--deps/v8/src/compiler/js-operator.h40
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc38
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc71
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h3
-rw-r--r--deps/v8/src/compiler/jump-threading.cc94
-rw-r--r--deps/v8/src/compiler/jump-threading.h2
-rw-r--r--deps/v8/src/compiler/linkage.cc6
-rw-r--r--deps/v8/src/compiler/linkage.h5
-rw-r--r--deps/v8/src/compiler/live-range-separator.h6
-rw-r--r--deps/v8/src/compiler/load-elimination.cc10
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc132
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.h19
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc53
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc4
-rw-r--r--deps/v8/src/compiler/machine-operator.cc186
-rw-r--r--deps/v8/src/compiler/machine-operator.h30
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc32
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h3
-rw-r--r--deps/v8/src/compiler/mips/OWNERS1
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc332
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc244
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS3
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc404
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h6
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc261
-rw-r--r--deps/v8/src/compiler/move-optimizer.h6
-rw-r--r--deps/v8/src/compiler/node-aux-data.h18
-rw-r--r--deps/v8/src/compiler/node-properties.cc21
-rw-r--r--deps/v8/src/compiler/node-properties.h2
-rw-r--r--deps/v8/src/compiler/node.cc4
-rw-r--r--deps/v8/src/compiler/node.h12
-rw-r--r--deps/v8/src/compiler/opcodes.h181
-rw-r--r--deps/v8/src/compiler/operation-typer.cc2
-rw-r--r--deps/v8/src/compiler/operator-properties.cc3
-rw-r--r--deps/v8/src/compiler/persistent-map.h57
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc103
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc194
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc264
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc84
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h20
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h6
-rw-r--r--deps/v8/src/compiler/register-allocator.cc6
-rw-r--r--deps/v8/src/compiler/register-allocator.h6
-rw-r--r--deps/v8/src/compiler/representation-change.cc16
-rw-r--r--deps/v8/src/compiler/representation-change.h24
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc191
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h8
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc8
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc298
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc130
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h5
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc149
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc181
-rw-r--r--deps/v8/src/compiler/simplified-operator.h36
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc11
-rw-r--r--deps/v8/src/compiler/type-cache.h6
-rw-r--r--deps/v8/src/compiler/typer.cc52
-rw-r--r--deps/v8/src/compiler/types.cc30
-rw-r--r--deps/v8/src/compiler/unwinding-info-writer.h2
-rw-r--r--deps/v8/src/compiler/verifier.cc139
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc984
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h56
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc68
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc650
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h5
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc3
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc255
-rw-r--r--deps/v8/src/compiler/x64/unwinding-info-writer-x64.h6
-rw-r--r--deps/v8/src/contexts.h136
-rw-r--r--deps/v8/src/conversions-inl.h15
-rw-r--r--deps/v8/src/conversions.cc44
-rw-r--r--deps/v8/src/conversions.h3
-rw-r--r--deps/v8/src/counters.cc16
-rw-r--r--deps/v8/src/counters.h59
-rw-r--r--deps/v8/src/d8-posix.cc4
-rw-r--r--deps/v8/src/d8.cc109
-rw-r--r--deps/v8/src/d8.h8
-rw-r--r--deps/v8/src/date.h4
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc3
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc173
-rw-r--r--deps/v8/src/debug/debug-frames.cc5
-rw-r--r--deps/v8/src/debug/debug-interface.h6
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc3
-rw-r--r--deps/v8/src/debug/debug-scopes.cc9
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc8
-rw-r--r--deps/v8/src/debug/debug.cc387
-rw-r--r--deps/v8/src/debug/debug.h81
-rw-r--r--deps/v8/src/debug/debug.js633
-rw-r--r--deps/v8/src/debug/interface-types.h30
-rw-r--r--deps/v8/src/debug/liveedit.cc9
-rw-r--r--deps/v8/src/debug/liveedit.h2
-rw-r--r--deps/v8/src/debug/mips/OWNERS1
-rw-r--r--deps/v8/src/debug/mips64/OWNERS1
-rw-r--r--deps/v8/src/debug/mirrors.js12
-rw-r--r--deps/v8/src/deoptimize-reason.h4
-rw-r--r--deps/v8/src/deoptimizer.cc31
-rw-r--r--deps/v8/src/disasm.h8
-rw-r--r--deps/v8/src/disassembler.cc6
-rw-r--r--deps/v8/src/eh-frame.h2
-rw-r--r--deps/v8/src/elements-kind.cc88
-rw-r--r--deps/v8/src/elements-kind.h8
-rw-r--r--deps/v8/src/elements.cc336
-rw-r--r--deps/v8/src/elements.h15
-rw-r--r--deps/v8/src/execution.h3
-rw-r--r--deps/v8/src/external-reference-table.cc2
-rw-r--r--deps/v8/src/factory.cc213
-rw-r--r--deps/v8/src/factory.h50
-rw-r--r--deps/v8/src/fast-dtoa.cc30
-rw-r--r--deps/v8/src/feedback-vector.cc729
-rw-r--r--deps/v8/src/feedback-vector.h460
-rw-r--r--deps/v8/src/field-index-inl.h2
-rw-r--r--deps/v8/src/field-index.h2
-rw-r--r--deps/v8/src/field-type.h1
-rw-r--r--deps/v8/src/flag-definitions.h72
-rw-r--r--deps/v8/src/frames.cc177
-rw-r--r--deps/v8/src/frames.h32
-rw-r--r--deps/v8/src/gdb-jit.h2
-rw-r--r--deps/v8/src/global-handles.cc78
-rw-r--r--deps/v8/src/global-handles.h6
-rw-r--r--deps/v8/src/globals.h241
-rw-r--r--deps/v8/src/handler-table.cc220
-rw-r--r--deps/v8/src/handler-table.h135
-rw-r--r--deps/v8/src/heap-symbols.h487
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc107
-rw-r--r--deps/v8/src/heap/concurrent-marking.h77
-rw-r--r--deps/v8/src/heap/heap-inl.h2
-rw-r--r--deps/v8/src/heap/heap.cc323
-rw-r--r--deps/v8/src/heap/heap.h78
-rw-r--r--deps/v8/src/heap/incremental-marking.cc24
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h6
-rw-r--r--deps/v8/src/heap/invalidated-slots.h6
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc130
-rw-r--r--deps/v8/src/heap/item-parallel-job.h134
-rw-r--r--deps/v8/src/heap/mark-compact.cc160
-rw-r--r--deps/v8/src/heap/mark-compact.h5
-rw-r--r--deps/v8/src/heap/marking.h6
-rw-r--r--deps/v8/src/heap/memory-reducer.cc1
-rw-r--r--deps/v8/src/heap/memory-reducer.h6
-rw-r--r--deps/v8/src/heap/object-stats.cc878
-rw-r--r--deps/v8/src/heap/object-stats.h88
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.h7
-rw-r--r--deps/v8/src/heap/remembered-set.h9
-rw-r--r--deps/v8/src/heap/scavenge-job.h2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h9
-rw-r--r--deps/v8/src/heap/scavenger.cc9
-rw-r--r--deps/v8/src/heap/scavenger.h5
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc32
-rw-r--r--deps/v8/src/heap/slot-set.h6
-rw-r--r--deps/v8/src/heap/spaces-inl.h32
-rw-r--r--deps/v8/src/heap/spaces.cc190
-rw-r--r--deps/v8/src/heap/spaces.h95
-rw-r--r--deps/v8/src/heap/store-buffer.h6
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h2
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.h2
-rw-r--r--deps/v8/src/heap/sweeper.cc8
-rw-r--r--deps/v8/src/heap/worklist.h6
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h40
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc550
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h635
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc8
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc5
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.h6
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc7
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc175
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h78
-rw-r--r--deps/v8/src/ia32/sse-instr.h6
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc95
-rw-r--r--deps/v8/src/ic/accessor-assembler.h6
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h6
-rw-r--r--deps/v8/src/ic/ic-inl.h6
-rw-r--r--deps/v8/src/ic/ic.cc206
-rw-r--r--deps/v8/src/ic/ic.h66
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc30
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h6
-rw-r--r--deps/v8/src/ic/stub-cache.h6
-rw-r--r--deps/v8/src/identity-map.cc13
-rw-r--r--deps/v8/src/identity-map.h11
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/injected-script.cc8
-rw-r--r--deps/v8/src/inspector/injected-script.h6
-rw-r--r--deps/v8/src/inspector/inspected-context.h6
-rw-r--r--deps/v8/src/inspector/inspector.gypi90
-rw-r--r--deps/v8/src/inspector/remote-object-id.h6
-rw-r--r--deps/v8/src/inspector/search-util.h6
-rw-r--r--deps/v8/src/inspector/string-16.cc13
-rw-r--r--deps/v8/src/inspector/string-16.h6
-rw-r--r--deps/v8/src/inspector/string-util.h6
-rw-r--r--deps/v8/src/inspector/test-interface.h2
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc7
-rw-r--r--deps/v8/src/inspector/v8-console-message.h6
-rw-r--r--deps/v8/src/inspector/v8-console.h6
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc24
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h10
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc18
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h6
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc1
-rw-r--r--deps/v8/src/inspector/v8-debugger.h7
-rw-r--r--deps/v8/src/inspector/v8-function-call.h6
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.h6
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.h6
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-regex.h6
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc9
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-value-utils.h6
-rw-r--r--deps/v8/src/inspector/wasm-translation.h6
-rw-r--r--deps/v8/src/instruction-stream.cc66
-rw-r--r--deps/v8/src/instruction-stream.h48
-rw-r--r--deps/v8/src/interface-descriptors.cc15
-rw-r--r--deps/v8/src/interface-descriptors.h29
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc24
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc21
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h9
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc234
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h28
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc11
-rw-r--r--deps/v8/src/interpreter/bytecodes.h34
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc24
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h13
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc18
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc339
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h177
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc514
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc398
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.h12
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h10
-rw-r--r--deps/v8/src/interpreter/interpreter.cc30
-rw-r--r--deps/v8/src/interpreter/interpreter.h17
-rw-r--r--deps/v8/src/isolate-inl.h2
-rw-r--r--deps/v8/src/isolate.cc593
-rw-r--r--deps/v8/src/isolate.h148
-rw-r--r--deps/v8/src/js/OWNERS1
-rw-r--r--deps/v8/src/js/array.js75
-rw-r--r--deps/v8/src/js/prologue.js13
-rw-r--r--deps/v8/src/js/typedarray.js291
-rw-r--r--deps/v8/src/json-parser.cc43
-rw-r--r--deps/v8/src/json-parser.h9
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.h6
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.h6
-rw-r--r--deps/v8/src/libsampler/sampler.cc97
-rw-r--r--deps/v8/src/locked-queue-inl.h6
-rw-r--r--deps/v8/src/locked-queue.h6
-rw-r--r--deps/v8/src/log.cc211
-rw-r--r--deps/v8/src/log.h25
-rw-r--r--deps/v8/src/lookup.cc129
-rw-r--r--deps/v8/src/lookup.h23
-rw-r--r--deps/v8/src/messages.h13
-rw-r--r--deps/v8/src/mips/OWNERS1
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h22
-rw-r--r--deps/v8/src/mips/assembler-mips.cc42
-rw-r--r--deps/v8/src/mips/assembler-mips.h24
-rw-r--r--deps/v8/src/mips/codegen-mips.cc8
-rw-r--r--deps/v8/src/mips/constants-mips.h6
-rw-r--r--deps/v8/src/mips/cpu-mips.cc4
-rw-r--r--deps/v8/src/mips/disasm-mips.cc1
-rw-r--r--deps/v8/src/mips/frame-constants-mips.h6
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc6
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc150
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h24
-rw-r--r--deps/v8/src/mips/simulator-mips.cc16
-rw-r--r--deps/v8/src/mips/simulator-mips.h17
-rw-r--r--deps/v8/src/mips64/OWNERS3
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h29
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc40
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h29
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h6
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc8
-rw-r--r--deps/v8/src/mips64/constants-mips64.h6
-rw-r--r--deps/v8/src/mips64/cpu-mips64.cc4
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc1
-rw-r--r--deps/v8/src/mips64/frame-constants-mips64.h6
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc6
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc212
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h32
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc16
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h23
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h9
-rw-r--r--deps/v8/src/objects-debug.cc189
-rw-r--r--deps/v8/src/objects-inl.h145
-rw-r--r--deps/v8/src/objects-printer.cc453
-rw-r--r--deps/v8/src/objects.cc977
-rw-r--r--deps/v8/src/objects.h375
-rw-r--r--deps/v8/src/objects/bigint.cc108
-rw-r--r--deps/v8/src/objects/bigint.h12
-rw-r--r--deps/v8/src/objects/code-inl.h77
-rw-r--r--deps/v8/src/objects/code.h134
-rw-r--r--deps/v8/src/objects/compilation-cache.h31
-rw-r--r--deps/v8/src/objects/data-handler-inl.h6
-rw-r--r--deps/v8/src/objects/data-handler.h6
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h2
-rw-r--r--deps/v8/src/objects/debug-objects.cc159
-rw-r--r--deps/v8/src/objects/debug-objects.h37
-rw-r--r--deps/v8/src/objects/dictionary.h52
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h107
-rw-r--r--deps/v8/src/objects/fixed-array.h30
-rw-r--r--deps/v8/src/objects/intl-objects.cc1
-rw-r--r--deps/v8/src/objects/js-array-inl.h29
-rw-r--r--deps/v8/src/objects/js-array.h13
-rw-r--r--deps/v8/src/objects/js-promise-inl.h40
-rw-r--r--deps/v8/src/objects/js-promise.h105
-rw-r--r--deps/v8/src/objects/js-regexp.h2
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h6
-rw-r--r--deps/v8/src/objects/literal-objects.cc11
-rw-r--r--deps/v8/src/objects/map-inl.h1
-rw-r--r--deps/v8/src/objects/map.h2
-rw-r--r--deps/v8/src/objects/microtask-inl.h31
-rw-r--r--deps/v8/src/objects/microtask.h77
-rw-r--r--deps/v8/src/objects/module.h9
-rw-r--r--deps/v8/src/objects/name-inl.h21
-rw-r--r--deps/v8/src/objects/name.h13
-rw-r--r--deps/v8/src/objects/object-macros-undef.h2
-rw-r--r--deps/v8/src/objects/object-macros.h2
-rw-r--r--deps/v8/src/objects/promise-inl.h48
-rw-r--r--deps/v8/src/objects/promise.h168
-rw-r--r--deps/v8/src/objects/scope-info.cc41
-rw-r--r--deps/v8/src/objects/scope-info.h7
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h5
-rw-r--r--deps/v8/src/objects/shared-function-info.h20
-rw-r--r--deps/v8/src/objects/string.h2
-rw-r--r--deps/v8/src/objects/template-objects.cc138
-rw-r--r--deps/v8/src/objects/template-objects.h50
-rw-r--r--deps/v8/src/parsing/OWNERS1
-rw-r--r--deps/v8/src/parsing/background-parsing-task.cc112
-rw-r--r--deps/v8/src/parsing/background-parsing-task.h74
-rw-r--r--deps/v8/src/parsing/expression-classifier.h6
-rw-r--r--deps/v8/src/parsing/parse-info.cc2
-rw-r--r--deps/v8/src/parsing/parse-info.h14
-rw-r--r--deps/v8/src/parsing/parser-base.h207
-rw-r--r--deps/v8/src/parsing/parser.cc290
-rw-r--r--deps/v8/src/parsing/parser.h70
-rw-r--r--deps/v8/src/parsing/parsing.cc7
-rw-r--r--deps/v8/src/parsing/preparse-data-format.h32
-rw-r--r--deps/v8/src/parsing/preparse-data.cc43
-rw-r--r--deps/v8/src/parsing/preparse-data.h53
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc19
-rw-r--r--deps/v8/src/parsing/preparser.cc3
-rw-r--r--deps/v8/src/parsing/preparser.h80
-rw-r--r--deps/v8/src/parsing/scanner.cc127
-rw-r--r--deps/v8/src/parsing/scanner.h6
-rw-r--r--deps/v8/src/parsing/token.h2
-rw-r--r--deps/v8/src/perf-jit.cc62
-rw-r--r--deps/v8/src/perf-jit.h20
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h42
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc23
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h19
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc5
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc5
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc1
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.h6
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc10
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc64
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h46
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc141
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h4
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc1
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc3
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc36
-rw-r--r--deps/v8/src/profiler/heap-profiler.h21
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc443
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h23
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h5
-rw-r--r--deps/v8/src/profiler/profile-generator.cc74
-rw-r--r--deps/v8/src/profiler/profile-generator.h25
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc51
-rw-r--r--deps/v8/src/profiler/profiler-listener.h8
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc18
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h2
-rw-r--r--deps/v8/src/profiler/strings-storage.cc2
-rw-r--r--deps/v8/src/profiler/strings-storage.h2
-rw-r--r--deps/v8/src/profiler/tick-sample.cc7
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h6
-rw-r--r--deps/v8/src/profiler/unbound-queue.h6
-rw-r--r--deps/v8/src/property.h6
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc37
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc2
-rw-r--r--deps/v8/src/regexp/jsregexp.cc32
-rw-r--r--deps/v8/src/regexp/mips/OWNERS3
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS1
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc22
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h6
-rw-r--r--deps/v8/src/register-configuration.cc89
-rw-r--r--deps/v8/src/register-configuration.h9
-rw-r--r--deps/v8/src/runtime/runtime-array.cc14
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc24
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc7
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc29
-rw-r--r--deps/v8/src/runtime/runtime-date.cc9
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc264
-rw-r--r--deps/v8/src/runtime/runtime-error.cc6
-rw-r--r--deps/v8/src/runtime/runtime-function.cc17
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc36
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc81
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc15
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc4
-rw-r--r--deps/v8/src/runtime/runtime-module.cc18
-rw-r--r--deps/v8/src/runtime/runtime-object.cc154
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc99
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc115
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc8
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc36
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc31
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc25
-rw-r--r--deps/v8/src/runtime/runtime-test.cc178
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc81
-rw-r--r--deps/v8/src/runtime/runtime.h220
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h42
-rw-r--r--deps/v8/src/s390/assembler-s390.cc19
-rw-r--r--deps/v8/src/s390/assembler-s390.h17
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc5
-rw-r--r--deps/v8/src/s390/codegen-s390.cc5
-rw-r--r--deps/v8/src/s390/disasm-s390.cc1
-rw-r--r--deps/v8/src/s390/frame-constants-s390.h6
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc6
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc53
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h40
-rw-r--r--deps/v8/src/s390/simulator-s390.cc9
-rw-r--r--deps/v8/src/s390/simulator-s390.h4
-rw-r--r--deps/v8/src/safepoint-table.cc2
-rw-r--r--deps/v8/src/simulator-base.cc50
-rw-r--r--deps/v8/src/simulator-base.h32
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc31
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.h3
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/builtin-serializer.h3
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc20
-rw-r--r--deps/v8/src/snapshot/code-serializer.h32
-rw-r--r--deps/v8/src/snapshot/deserializer.cc36
-rw-r--r--deps/v8/src/snapshot/deserializer.h3
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc5
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc3
-rw-r--r--deps/v8/src/snapshot/serializer-common.h40
-rw-r--r--deps/v8/src/snapshot/serializer.cc9
-rw-r--r--deps/v8/src/snapshot/serializer.h13
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc48
-rw-r--r--deps/v8/src/snapshot/snapshot.h1
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc3
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc16
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h6
-rw-r--r--deps/v8/src/string-case.h2
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp59
-rw-r--r--deps/v8/src/tracing/trace-event.h6
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc11
-rw-r--r--deps/v8/src/trap-handler/trap-handler-internal.h6
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h6
-rw-r--r--deps/v8/src/unicode-decoder.cc128
-rw-r--r--deps/v8/src/unicode-decoder.h133
-rw-r--r--deps/v8/src/unicode.h4
-rw-r--r--deps/v8/src/utils.h13
-rw-r--r--deps/v8/src/v8.cc1
-rw-r--r--deps/v8/src/v8.gyp2630
-rw-r--r--deps/v8/src/v8.h5
-rw-r--r--deps/v8/src/v8memory.h6
-rw-r--r--deps/v8/src/value-serializer.cc68
-rw-r--r--deps/v8/src/value-serializer.h7
-rw-r--r--deps/v8/src/version.h6
-rw-r--r--deps/v8/src/visitors.cc23
-rw-r--r--deps/v8/src/visitors.h65
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h182
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h182
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h514
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h61
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc348
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h165
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc807
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h129
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h520
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h465
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h182
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h182
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h431
-rw-r--r--deps/v8/src/wasm/compilation-manager.cc10
-rw-r--r--deps/v8/src/wasm/compilation-manager.h11
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h90
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc23
-rw-r--r--deps/v8/src/wasm/module-compiler.cc607
-rw-r--r--deps/v8/src/wasm/module-compiler.h32
-rw-r--r--deps/v8/src/wasm/module-decoder.cc13
-rw-r--r--deps/v8/src/wasm/wasm-api.cc31
-rw-r--r--deps/v8/src/wasm/wasm-api.h35
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc163
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h71
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc70
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h17
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.cc17
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.h10
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc21
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc101
-rw-r--r--deps/v8/src/wasm/wasm-engine.h48
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h7
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc181
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h7
-rw-r--r--deps/v8/src/wasm/wasm-js.cc97
-rw-r--r--deps/v8/src/wasm/wasm-js.h16
-rw-r--r--deps/v8/src/wasm/wasm-limits.h2
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc93
-rw-r--r--deps/v8/src/wasm/wasm-memory.h6
-rw-r--r--deps/v8/src/wasm/wasm-module.cc8
-rw-r--r--deps/v8/src/wasm/wasm-module.h8
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h6
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc467
-rw-r--r--deps/v8/src/wasm/wasm-objects.h59
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc20
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h43
-rw-r--r--deps/v8/src/wasm/wasm-result.h6
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc87
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h6
-rw-r--r--deps/v8/src/wasm/wasm-text.cc3
-rw-r--r--deps/v8/src/wasm/wasm-text.h6
-rw-r--r--deps/v8/src/wasm/wasm-value.h6
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h134
-rw-r--r--deps/v8/src/x64/assembler-x64.cc763
-rw-r--r--deps/v8/src/x64/assembler-x64.h746
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc9
-rw-r--r--deps/v8/src/x64/codegen-x64.cc4
-rw-r--r--deps/v8/src/x64/disasm-x64.cc13
-rw-r--r--deps/v8/src/x64/frame-constants-x64.h6
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc7
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc441
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h283
-rw-r--r--deps/v8/src/x64/sse-instr.h6
-rw-r--r--deps/v8/src/zone/accounting-allocator.h1
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h281
-rw-r--r--deps/v8/src/zone/zone-containers.h6
-rw-r--r--deps/v8/src/zone/zone-handle-set.h4
-rw-r--r--deps/v8/test/benchmarks/benchmarks.gyp26
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status13
-rw-r--r--deps/v8/test/benchmarks/testcfg.py25
-rw-r--r--deps/v8/test/bot_default.gyp36
-rw-r--r--deps/v8/test/cctest/BUILD.gn2
-rw-r--r--deps/v8/test/cctest/OWNERS1
-rw-r--r--deps/v8/test/cctest/cctest.gyp523
-rw-r--r--deps/v8/test/cctest/cctest.h19
-rw-r--r--deps/v8/test/cctest/cctest.status66
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc3
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h6
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc31
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc46
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc35
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-run-wasm-machops.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc43
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc78
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc50
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden783
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden797
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden477
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden349
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden544
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden257
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden (renamed from deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden)10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden227
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc11
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h4
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc64
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc21
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc15
-rw-r--r--deps/v8/test/cctest/print-extension.h2
-rw-r--r--deps/v8/test/cctest/test-accessors.cc35
-rw-r--r--deps/v8/test/cctest/test-api.cc726
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc95
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc216
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc6
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc118
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc2
-rw-r--r--deps/v8/test/cctest/test-compiler.cc6
-rw-r--r--deps/v8/test/cctest/test-debug.cc1313
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc3
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc389
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc25
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc22
-rw-r--r--deps/v8/test/cctest/test-extra.js20
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc32
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc310
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc2
-rw-r--r--deps/v8/test/cctest/test-isolate-independent-builtins.cc369
-rw-r--r--deps/v8/test/cctest/test-log.cc8
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc2
-rw-r--r--deps/v8/test/cctest/test-parsing.cc449
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm.cc3
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc3
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc3
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x64.cc3
-rw-r--r--deps/v8/test/cctest/test-serialize.cc148
-rw-r--r--deps/v8/test/cctest/test-strings.cc154
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc42
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc10
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc12
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h3
-rw-r--r--deps/v8/test/cctest/testcfg.py14
-rw-r--r--deps/v8/test/cctest/trace-extension.h3
-rw-r--r--deps/v8/test/cctest/wasm/OWNERS2
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc308
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc160
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc5
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc74
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc50
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc30
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc44
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h6
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h13
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc31
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h4
-rw-r--r--deps/v8/test/d8_default.gyp31
-rw-r--r--deps/v8/test/debugger/debug/debug-scopes-suspended-generators.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-setbreakpoint.js3
-rw-r--r--deps/v8/test/debugger/debug/deserialize-script-id.js (renamed from deps/v8/test/mjsunit/deserialize-script-id.js)2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js42
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/throw-with-throw-in-reject.js27
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-3960.js35
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-async.js (renamed from deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-async.js)11
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins-2.js (renamed from deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-builtins-2.js)42
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js (renamed from deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-builtins.js)65
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-control.js (renamed from deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-control.js)0
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-iife.js (renamed from deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-iife.js)0
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-ops.js (renamed from deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js)0
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js (renamed from deps/v8/test/debugger/debug/debug-evaluate-no-side-effect.js)10
-rw-r--r--deps/v8/test/debugger/debugger.gyp26
-rw-r--r--deps/v8/test/debugger/regress/regress-7421.js78
-rw-r--r--deps/v8/test/debugger/regress/regress-crbug-507070.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-507070.js)2
-rw-r--r--deps/v8/test/debugger/regress/regress-crbug-808973.js18
-rw-r--r--deps/v8/test/debugger/test-api.js12
-rw-r--r--deps/v8/test/debugger/testcfg.py12
-rw-r--r--deps/v8/test/default.gyp35
-rw-r--r--deps/v8/test/fuzzer/README.md12
-rw-r--r--deps/v8/test/fuzzer/fuzzer.isolate4
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc7
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc448
-rw-r--r--deps/v8/test/fuzzer/regexp_builtins/mjsunit.js188
-rw-r--r--deps/v8/test/fuzzer/regexp_builtins/mjsunit.js.h496
-rw-r--r--deps/v8/test/fuzzer/testcfg.py17
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc8
-rw-r--r--deps/v8/test/fuzzer/wasm-call.cc134
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc141
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc97
-rw-r--r--deps/v8/test/fuzzer/wasm.cc7
-rw-r--r--deps/v8/test/fuzzer/wasm_call/foo0
-rw-r--r--deps/v8/test/inspector/debugger/break-locations-await-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts-expected.txt17
-rw-r--r--deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts.js49
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/eval-without-codegen-expected.txt32
-rw-r--r--deps/v8/test/inspector/debugger/eval-without-codegen.js38
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt78
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/script-unique-hash-expected.txt169
-rw-r--r--deps/v8/test/inspector/debugger/script-unique-hash.js36
-rw-r--r--deps/v8/test/inspector/debugger/step-into-external-async-task.js5
-rw-r--r--deps/v8/test/inspector/debugger/step-into-optimized-blackbox-expected.txt9
-rw-r--r--deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js46
-rw-r--r--deps/v8/test/inspector/heap-profiler/console-retaining-path-expected.txt5
-rw-r--r--deps/v8/test/inspector/heap-profiler/console-retaining-path.js97
-rw-r--r--deps/v8/test/inspector/inspector-test.cc15
-rw-r--r--deps/v8/test/inspector/inspector.gyp58
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt39
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-run-microtasks.js13
-rw-r--r--deps/v8/test/inspector/testcfg.py8
-rw-r--r--deps/v8/test/intl/date-format/month-far-future.js30
-rw-r--r--deps/v8/test/intl/intl.gyp26
-rw-r--r--deps/v8/test/intl/testcfg.py12
-rw-r--r--deps/v8/test/js-perf-test/Array/from.js153
-rw-r--r--deps/v8/test/js-perf-test/Array/run.js3
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralSpread/run.js97
-rw-r--r--deps/v8/test/js-perf-test/Generators/generators.js2
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json36
-rw-r--r--deps/v8/test/js-perf-test/OWNERS1
-rw-r--r--deps/v8/test/js-perf-test/Strings/string-indexof.js97
-rw-r--r--deps/v8/test/message/fail/function-sent-escaped.js10
-rw-r--r--deps/v8/test/message/fail/function-sent-escaped.out4
-rw-r--r--deps/v8/test/message/fail/paren_in_arg_string.out8
-rw-r--r--deps/v8/test/message/fail/redeclaration4.js10
-rw-r--r--deps/v8/test/message/fail/redeclaration4.out5
-rw-r--r--deps/v8/test/message/fail/redeclaration5.js8
-rw-r--r--deps/v8/test/message/fail/redeclaration5.out5
-rw-r--r--deps/v8/test/message/message.gyp26
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert_true.js7
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert_true.out10
-rw-r--r--deps/v8/test/message/testcfg.py19
-rw-r--r--deps/v8/test/mjsunit/array-reduce.js59
-rw-r--r--deps/v8/test/mjsunit/array-sort.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js47
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-constructor.js198
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-catch.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-finally.js41
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-prototype-then.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-resolve.js119
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-803022.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-808472.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-815392.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-817225.js27
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js220
-rw-r--r--deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js10
-rw-r--r--deps/v8/test/mjsunit/default-nospec.js84
-rw-r--r--deps/v8/test/mjsunit/es6/array-from.js8
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator.js4
-rw-r--r--deps/v8/test/mjsunit/es6/classof-proxy.js27
-rw-r--r--deps/v8/test/mjsunit/es6/collection-iterator.js2
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js65
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js80
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js65
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js76
-rw-r--r--deps/v8/test/mjsunit/es6/collections.js11
-rw-r--r--deps/v8/test/mjsunit/es6/generators-objects.js1
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js48
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js53
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js43
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js53
-rw-r--r--deps/v8/test/mjsunit/es6/promise-resolve-thenable-job.js127
-rw-r--r--deps/v8/test/mjsunit/es6/proxies.js4
-rw-r--r--deps/v8/test/mjsunit/es6/proxy-function-tostring.js7
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js236
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js (renamed from deps/v8/test/mjsunit/es6/spread-array.js)8
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js21
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js22
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js22
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js2
-rw-r--r--deps/v8/test/mjsunit/es6/templates.js162
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js63
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-every.js1
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-filter.js111
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-foreach.js1
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-from.js140
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-of.js16
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js1
-rw-r--r--deps/v8/test/mjsunit/es8/object-entries.js94
-rw-r--r--deps/v8/test/mjsunit/es9/regexp-lookbehind.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-basic.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/add.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/and.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/as-int-n.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/basics.js42
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/comparisons.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/dataview.js78
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/dec.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/div.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/inc.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/mod.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/mul.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/neg.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/not.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/or.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/sar.js134
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/shl.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/sub.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/typedarray.js240
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/xor.js130
-rw-r--r--deps/v8/test/mjsunit/harmony/function-sent.js90
-rw-r--r--deps/v8/test/mjsunit/harmony/function-tostring.js22
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-ic.js294
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-special-object.js49
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields.js441
-rw-r--r--deps/v8/test/mjsunit/harmony/private.js2
-rw-r--r--deps/v8/test/mjsunit/keyed-store-array-literal.js75
-rw-r--r--deps/v8/test/mjsunit/math-deopt.js70
-rw-r--r--deps/v8/test/mjsunit/mjsunit.gyp26
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js51
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status58
-rw-r--r--deps/v8/test/mjsunit/mjsunit_suppressions.js9
-rw-r--r--deps/v8/test/mjsunit/optimized-reduce.js49
-rw-r--r--deps/v8/test/mjsunit/optimized-reduceright.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2470.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5010.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5691.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6703.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7369.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-800651.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-803750.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804096.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804176.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804177.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804188.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804288.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804801.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804837.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-805729.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-805768.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-812451.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-814643.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-816289.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-816317.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-821137.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-995.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-charat-empty.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-109362.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-474297.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-663410.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-802333.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-805765.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-806200.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-808192.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-813450.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-816961.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-822284.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js18
-rw-r--r--deps/v8/test/mjsunit/regress/string-next-encoding.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7353.js29
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7364.js31
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7366.js33
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7422.js27
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-7499.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-801785.js22
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-803427.js13
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-803788.js27
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-808012.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-810973.js32
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-812005.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-816226.js5
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-819869.js12
-rw-r--r--deps/v8/test/mjsunit/skipping-inner-functions.js12
-rw-r--r--deps/v8/test/mjsunit/string-charcodeat-external.js17
-rw-r--r--deps/v8/test/mjsunit/string-deopt.js57
-rw-r--r--deps/v8/test/mjsunit/string-trim.js11
-rw-r--r--deps/v8/test/mjsunit/testcfg.py191
-rw-r--r--deps/v8/test/mjsunit/wasm/OWNERS2
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f32.js19
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f64.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/errors.js27
-rw-r--r--deps/v8/test/mjsunit/wasm/function-prototype.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/import-function.js (renamed from deps/v8/test/mjsunit/wasm/import-table.js)0
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js130
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/liftoff.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-size.js46
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js326
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js10
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.gyp46
-rw-r--r--deps/v8/test/mkgrokdump/testcfg.py8
-rw-r--r--deps/v8/test/mozilla/mozilla.gyp26
-rw-r--r--deps/v8/test/mozilla/mozilla.status4
-rw-r--r--deps/v8/test/mozilla/testcfg.py16
-rw-r--r--deps/v8/test/optimize_for_size.gyp30
-rw-r--r--deps/v8/test/perf.gyp27
-rw-r--r--deps/v8/test/preparser/preparser.gyp26
-rw-r--r--deps/v8/test/preparser/testcfg.py23
-rw-r--r--deps/v8/test/test262/local-tests/test/intl402/DateTimeFormat/12.1.1_1.js7
-rw-r--r--deps/v8/test/test262/local-tests/test/intl402/NumberFormat/11.1.1_1.js7
-rw-r--r--deps/v8/test/test262/test262.gyp34
-rw-r--r--deps/v8/test/test262/test262.status103
-rw-r--r--deps/v8/test/test262/testcfg.py52
-rw-r--r--deps/v8/test/unittests/BUILD.gn2
-rw-r--r--deps/v8/test/unittests/allocation-unittest.cc5
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc216
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc48
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc48
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc31
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc1254
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc444
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS3
-rw-r--r--deps/v8/test/unittests/compiler/mips64/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc27
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h6
-rw-r--r--deps/v8/test/unittests/compiler/persistent-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc19
-rw-r--r--deps/v8/test/unittests/counters-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc152
-rw-r--r--deps/v8/test/unittests/heap/object-stats-unittest.cc38
-rw-r--r--deps/v8/test/unittests/heap/scavenge-job-unittest.cc3
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc19
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc16
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc164
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h17
-rw-r--r--deps/v8/test/unittests/object-unittest.cc9
-rw-r--r--deps/v8/test/unittests/test-utils.cc2
-rw-r--r--deps/v8/test/unittests/testcfg.py25
-rw-r--r--deps/v8/test/unittests/unicode-unittest.cc93
-rw-r--r--deps/v8/test/unittests/unittests.gyp305
-rw-r--r--deps/v8/test/unittests/unittests.status4
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc3046
-rw-r--r--deps/v8/test/unittests/wasm/OWNERS2
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc1
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc322
-rw-r--r--deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc78
-rw-r--r--deps/v8/test/wasm-spec-tests/OWNERS1
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py8
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp26
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check.js2
-rw-r--r--deps/v8/test/webkit/string-trim-expected.txt102
-rw-r--r--deps/v8/test/webkit/string-trim.js113
-rw-r--r--deps/v8/test/webkit/testcfg.py10
-rw-r--r--deps/v8/test/webkit/webkit.gyp26
-rw-r--r--deps/v8/testing/gmock.gyp72
-rw-r--r--deps/v8/third_party/binutils/detect_v8_host_arch.py (renamed from deps/v8/gypfiles/detect_v8_host_arch.py)3
-rwxr-xr-xdeps/v8/third_party/binutils/download.py6
-rw-r--r--deps/v8/tools/BUILD.gn9
-rw-r--r--deps/v8/tools/Makefile.tags30
-rwxr-xr-xdeps/v8/tools/bigint-tester.py54
-rw-r--r--deps/v8/tools/check-static-initializers.gyp26
-rw-r--r--deps/v8/tools/clusterfuzz/BUILD.gn (renamed from deps/v8/tools/foozzie/BUILD.gn)2
-rw-r--r--deps/v8/tools/clusterfuzz/PRESUBMIT.py8
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt (renamed from deps/v8/tools/foozzie/testdata/failure_output.txt)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/fuzz-123.js (renamed from deps/v8/tools/foozzie/testdata/fuzz-123.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_1.py (renamed from deps/v8/tools/foozzie/testdata/test_d8_1.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_2.py (renamed from deps/v8/tools/foozzie/testdata/test_d8_2.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/test_d8_3.py (renamed from deps/v8/tools/foozzie/testdata/test_d8_3.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/v8_build_config.json (renamed from deps/v8/tools/foozzie/testdata/v8_build_config.json)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_commands.py (renamed from deps/v8/tools/foozzie/v8_commands.py)0
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py (renamed from deps/v8/tools/foozzie/v8_foozzie.py)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js100
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/clusterfuzz/v8_foozzie_test.py (renamed from deps/v8/tools/foozzie/v8_foozzie_test.py)36
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py45
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock.js (renamed from deps/v8/tools/foozzie/v8_mock.js)11
-rw-r--r--deps/v8/tools/clusterfuzz/v8_mock_archs.js (renamed from deps/v8/tools/foozzie/v8_mock_archs.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_suppressions.js (renamed from deps/v8/tools/foozzie/v8_suppressions.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/v8_suppressions.py (renamed from deps/v8/tools/foozzie/v8_suppressions.py)8
-rwxr-xr-xdeps/v8/tools/dev/gm.py15
-rwxr-xr-xdeps/v8/tools/gcmole/download_gcmole_tools.py20
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua54
-rw-r--r--deps/v8/tools/gcmole/run_gcmole.gyp23
-rwxr-xr-xdeps/v8/tools/gcov.sh67
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py5
-rwxr-xr-xdeps/v8/tools/get_landmines.py (renamed from deps/v8/gypfiles/get_landmines.py)13
-rwxr-xr-xdeps/v8/tools/grokdump.py80
-rwxr-xr-xdeps/v8/tools/gyp_flag_compare.py280
-rw-r--r--deps/v8/tools/heap-stats/README.md5
-rw-r--r--deps/v8/tools/heap-stats/categories.js98
-rw-r--r--deps/v8/tools/heap-stats/details-selection.html100
-rw-r--r--deps/v8/tools/heap-stats/details-selection.js157
-rw-r--r--deps/v8/tools/heap-stats/global-timeline.js1
-rw-r--r--deps/v8/tools/heap-stats/histogram-viewer.html19
-rw-r--r--deps/v8/tools/heap-stats/histogram-viewer.js152
-rw-r--r--deps/v8/tools/heap-stats/index.html37
-rw-r--r--deps/v8/tools/heap-stats/model.js77
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.html63
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js231
-rw-r--r--deps/v8/tools/isolate_driver.py2
-rw-r--r--deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py20
-rw-r--r--deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp26
-rwxr-xr-xdeps/v8/tools/mb/mb.py520
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py281
-rwxr-xr-xdeps/v8/tools/mingw-generate-makefiles.sh97
-rw-r--r--deps/v8/tools/mips_toolchain.tar.gz.sha11
-rwxr-xr-xdeps/v8/tools/node/build_gn.py94
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py6
-rwxr-xr-xdeps/v8/tools/node/update_node.py35
-rw-r--r--deps/v8/tools/parser-shell.cc63
-rwxr-xr-xdeps/v8/tools/presubmit.py6
-rw-r--r--deps/v8/tools/release/common_includes.py1
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py9
-rwxr-xr-xdeps/v8/tools/release/roll_merge.py10
-rwxr-xr-xdeps/v8/tools/release/update_node.py176
-rw-r--r--deps/v8/tools/run-deopt-fuzzer.gyp26
-rw-r--r--deps/v8/tools/run-deopt-fuzzer.isolate19
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py14
-rw-r--r--deps/v8/tools/run-num-fuzzer.gyp26
-rw-r--r--deps/v8/tools/run-num-fuzzer.isolate5
-rwxr-xr-xdeps/v8/tools/run-num-fuzzer.py (renamed from deps/v8/tools/run-gc-fuzzer.py)6
-rwxr-xr-xdeps/v8/tools/run_perf.py3
-rw-r--r--deps/v8/tools/testrunner/base_runner.py245
-rwxr-xr-xdeps/v8/tools/testrunner/deopt_fuzzer.py336
-rwxr-xr-xdeps/v8/tools/testrunner/gc_fuzzer.py280
-rw-r--r--deps/v8/tools/testrunner/local/command.py31
-rw-r--r--deps/v8/tools/testrunner/local/execution.py293
-rw-r--r--deps/v8/tools/testrunner/local/perfdata.py141
-rw-r--r--deps/v8/tools/testrunner/local/pool.py201
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/testrunner/local/pool_unittest.py22
-rw-r--r--deps/v8/tools/testrunner/local/progress.py452
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py4
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py105
-rw-r--r--deps/v8/tools/testrunner/local/utils.py15
-rw-r--r--deps/v8/tools/testrunner/local/variants.py27
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py225
-rw-r--r--deps/v8/tools/testrunner/objects/context.py51
-rw-r--r--deps/v8/tools/testrunner/objects/output.py9
-rw-r--r--deps/v8/tools/testrunner/objects/predictable.py20
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py117
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py27
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py505
-rw-r--r--deps/v8/tools/testrunner/test_config.py32
-rw-r--r--deps/v8/tools/testrunner/testproc/base.py51
-rw-r--r--deps/v8/tools/testrunner/testproc/combiner.py124
-rw-r--r--deps/v8/tools/testrunner/testproc/execution.py65
-rw-r--r--deps/v8/tools/testrunner/testproc/expectation.py27
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py287
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py26
-rw-r--r--deps/v8/tools/testrunner/testproc/rerun.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/seed.py58
-rw-r--r--deps/v8/tools/testrunner/testproc/sigproc.py31
-rw-r--r--deps/v8/tools/testrunner/testproc/timeout.py28
-rw-r--r--deps/v8/tools/testrunner/trycatch_loader.js42
-rw-r--r--deps/v8/tools/testrunner/utils/__init__.py3
-rw-r--r--deps/v8/tools/testrunner/utils/random_utils.py13
-rw-r--r--deps/v8/tools/toolchain/BUILD.gn23
-rwxr-xr-xdeps/v8/tools/try_perf.py32
-rw-r--r--deps/v8/tools/turbolizer/index.html6
-rw-r--r--deps/v8/tools/turbolizer/monkey.js19
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css11
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.js3
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py61
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json30
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json20
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py8
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py8
-rw-r--r--deps/v8/tools/v8heapconst.py383
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh10
-rw-r--r--deps/v8/tools/whitespace.txt4
1331 files changed, 64332 insertions, 50163 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index f07fc1cb62..4ea2ead23d 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -36,8 +36,6 @@
/_*
/build
/buildtools
-/gypfiles/.gold_plugin
-/gypfiles/win_toolchain.json
/hydrogen.cfg
/obj
/out
@@ -76,6 +74,8 @@
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go
+/tools/mips_toolchain
+/tools/mips_toolchain.tar.gz
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index dfd3eef878..b2b01df888 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -136,6 +136,7 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
Sylvestre Ledru <sledru@mozilla.com>
+Taketoshi Aono <brn@b6n.ch>
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 4970765972..72a19b2ca4 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -64,6 +64,10 @@ declare_args() {
# Enable fast mksnapshot runs.
v8_enable_fast_mksnapshot = false
+ # Enable embedded builtins.
+ # TODO(jgruber,v8:6666): Support ia32.
+ v8_enable_embedded_builtins = false
+
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@@ -319,6 +323,9 @@ config("features") {
if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
+ if (v8_enable_embedded_builtins) {
+ defines += [ "V8_EMBEDDED_BUILTINS" ]
+ }
}
config("toolchain") {
@@ -387,6 +394,9 @@ config("toolchain") {
"_MIPS_ARCH_MIPS32R6",
"FPU_MODE_FP64",
]
+ if (mips_use_msa) {
+ defines += [ "_MIPS_MSA" ]
+ }
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS32R2" ]
if (mips_fpu_mode == "fp64") {
@@ -424,6 +434,9 @@ config("toolchain") {
}
if (mips_arch_variant == "r6") {
defines += [ "_MIPS_ARCH_MIPS64R6" ]
+ if (mips_use_msa) {
+ defines += [ "_MIPS_MSA" ]
+ }
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
@@ -514,8 +527,6 @@ config("toolchain") {
if (is_clang) {
cflags += [
- "-Wsign-compare",
-
# TODO(hans): Remove once http://crbug.com/428099 is resolved.
"-Winconsistent-missing-override",
]
@@ -883,6 +894,14 @@ action("v8_dump_build_config") {
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
]
+
+ if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
+ v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
+ args += [
+ "mips_arch_variant=\"$mips_arch_variant\"",
+ "mips_use_msa=$mips_use_msa",
+ ]
+ }
}
###############################################################################
@@ -1018,6 +1037,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-arguments-gen.cc",
"src/builtins/builtins-arguments-gen.h",
"src/builtins/builtins-array-gen.cc",
+ "src/builtins/builtins-array-gen.h",
"src/builtins/builtins-async-function-gen.cc",
"src/builtins/builtins-async-gen.cc",
"src/builtins/builtins-async-gen.h",
@@ -1060,8 +1080,11 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-string-gen.h",
"src/builtins/builtins-symbol-gen.cc",
"src/builtins/builtins-typedarray-gen.cc",
+ "src/builtins/builtins-typedarray-gen.h",
"src/builtins/builtins-utils-gen.h",
"src/builtins/builtins-wasm-gen.cc",
+ "src/builtins/growable-fixed-array-gen.cc",
+ "src/builtins/growable-fixed-array-gen.h",
"src/builtins/setup-builtins-internal.cc",
"src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc",
@@ -1193,7 +1216,6 @@ v8_source_set("v8_base") {
"//base/trace_event/common/trace_event_common.h",
### gcmole(all) ###
- "include/v8-debug.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
"include/v8-platform.h",
@@ -1236,8 +1258,6 @@ v8_source_set("v8_base") {
"src/assert-scope.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
- "src/ast/ast-numbering.cc",
- "src/ast/ast-numbering.h",
"src/ast/ast-source-ranges.h",
"src/ast/ast-traversal-visitor.h",
"src/ast/ast-value-factory.cc",
@@ -1304,6 +1324,8 @@ v8_source_set("v8_base") {
"src/builtins/builtins-utils.h",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
+ "src/builtins/constants-table-builder.cc",
+ "src/builtins/constants-table-builder.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/callable.h",
@@ -1396,6 +1418,7 @@ v8_source_set("v8_base") {
"src/compiler/frame-states.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
+ "src/compiler/functional-list.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/graph-assembler.cc",
@@ -1639,6 +1662,8 @@ v8_source_set("v8_base") {
"src/global-handles.cc",
"src/global-handles.h",
"src/globals.h",
+ "src/handler-table.cc",
+ "src/handler-table.h",
"src/handles-inl.h",
"src/handles.cc",
"src/handles.h",
@@ -1670,6 +1695,7 @@ v8_source_set("v8_base") {
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.cc",
"src/heap/invalidated-slots.h",
+ "src/heap/item-parallel-job.cc",
"src/heap/item-parallel-job.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
@@ -1719,6 +1745,8 @@ v8_source_set("v8_base") {
"src/icu_util.h",
"src/identity-map.cc",
"src/identity-map.h",
+ "src/instruction-stream.cc",
+ "src/instruction-stream.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter/block-coverage-builder.h",
@@ -1835,6 +1863,8 @@ v8_source_set("v8_base") {
"src/objects/js-array.h",
"src/objects/js-collection-inl.h",
"src/objects/js-collection.h",
+ "src/objects/js-promise-inl.h",
+ "src/objects/js-promise.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/literal-objects-inl.h",
@@ -1842,6 +1872,8 @@ v8_source_set("v8_base") {
"src/objects/literal-objects.h",
"src/objects/map-inl.h",
"src/objects/map.h",
+ "src/objects/microtask-inl.h",
+ "src/objects/microtask.h",
"src/objects/module-inl.h",
"src/objects/module.cc",
"src/objects/module.h",
@@ -1849,6 +1881,8 @@ v8_source_set("v8_base") {
"src/objects/name.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
+ "src/objects/promise-inl.h",
+ "src/objects/promise.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
"src/objects/regexp-match-info.h",
@@ -1865,8 +1899,6 @@ v8_source_set("v8_base") {
"src/objects/template-objects.h",
"src/ostreams.cc",
"src/ostreams.h",
- "src/parsing/background-parsing-task.cc",
- "src/parsing/background-parsing-task.h",
"src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h",
"src/parsing/expression-scope-reparenter.cc",
@@ -2126,8 +2158,6 @@ v8_source_set("v8_base") {
"src/wasm/signature-map.h",
"src/wasm/streaming-decoder.cc",
"src/wasm/streaming-decoder.h",
- "src/wasm/wasm-api.cc",
- "src/wasm/wasm-api.h",
"src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-code-manager.h",
"src/wasm/wasm-code-specialization.cc",
@@ -2570,11 +2600,15 @@ v8_component("v8_libbase") {
if (is_posix) {
sources += [
- "src/base/platform/platform-posix-time.cc",
- "src/base/platform/platform-posix-time.h",
"src/base/platform/platform-posix.cc",
"src/base/platform/platform-posix.h",
]
+ if (current_os != "aix") {
+ sources += [
+ "src/base/platform/platform-posix-time.cc",
+ "src/base/platform/platform-posix-time.h",
+ ]
+ }
}
if (is_linux) {
@@ -2824,7 +2858,7 @@ group("v8_clusterfuzz") {
if (v8_test_isolation_mode != "noop") {
deps += [
- "tools:run-deopt-fuzzer_run",
+ "test:d8_default_run",
"tools:run-num-fuzzer_run",
]
}
@@ -2842,9 +2876,9 @@ group("v8_fuzzers") {
":v8_simple_json_fuzzer",
":v8_simple_multi_return_fuzzer",
":v8_simple_parser_fuzzer",
+ ":v8_simple_regexp_builtins_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_async_fuzzer",
- ":v8_simple_wasm_call_fuzzer",
":v8_simple_wasm_code_fuzzer",
":v8_simple_wasm_compile_fuzzer",
":v8_simple_wasm_data_section_fuzzer",
@@ -2952,7 +2986,7 @@ v8_executable("d8") {
}
if (v8_correctness_fuzzer) {
- deps += [ "tools/foozzie:v8_correctness_fuzzer_resources" ]
+ deps += [ "tools/clusterfuzz:v8_correctness_fuzzer_resources" ]
}
defines = []
@@ -3127,6 +3161,25 @@ v8_source_set("parser_fuzzer") {
v8_fuzzer("parser_fuzzer") {
}
+v8_source_set("regexp_builtins_fuzzer") {
+ sources = [
+ "test/fuzzer/regexp-builtins.cc",
+ "test/fuzzer/regexp_builtins/mjsunit.js.h",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("regexp_builtins_fuzzer") {
+}
+
v8_source_set("regexp_fuzzer") {
sources = [
"test/fuzzer/regexp.cc",
@@ -3218,27 +3271,6 @@ v8_source_set("wasm_code_fuzzer") {
v8_fuzzer("wasm_code_fuzzer") {
}
-v8_source_set("wasm_call_fuzzer") {
- sources = [
- "test/common/wasm/test-signatures.h",
- "test/fuzzer/wasm-call.cc",
- ]
-
- deps = [
- ":fuzzer_support",
- ":lib_wasm_fuzzer_common",
- ":wasm_module_runner",
- ]
-
- configs = [
- ":external_config",
- ":internal_config_base",
- ]
-}
-
-v8_fuzzer("wasm_call_fuzzer") {
-}
-
v8_source_set("lib_wasm_fuzzer_common") {
sources = [
"test/fuzzer/wasm-fuzzer-common.cc",
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 1fe3135a01..cfeeb0e08b 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1748 @@
+2018-02-28: Version 6.6.346
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-28: Version 6.6.345
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-28: Version 6.6.344
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-28: Version 6.6.343
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-27: Version 6.6.342
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-27: Version 6.6.341
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-27: Version 6.6.340
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-27: Version 6.6.339
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-27: Version 6.6.338
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-27: Version 6.6.337
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-27: Version 6.6.336
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.335
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.334
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.333
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.332
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.331
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.330
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.329
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.328
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.327
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.326
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.325
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.324
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.323
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.322
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.321
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.320
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-26: Version 6.6.319
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-25: Version 6.6.318
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-25: Version 6.6.317
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-25: Version 6.6.316
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-25: Version 6.6.315
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-24: Version 6.6.314
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-24: Version 6.6.313
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.312
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.311
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.310
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.309
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.308
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.307
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.306
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.305
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.304
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.303
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.302
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.301
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.300
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.299
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-23: Version 6.6.298
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.297
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.296
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.295
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.294
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.293
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.292
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.291
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.290
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.289
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.288
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.287
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.286
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.285
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.284
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-22: Version 6.6.283
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-21: Version 6.6.282
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-21: Version 6.6.281
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-21: Version 6.6.280
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.279
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.278
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.277
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.276
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.275
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.274
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.273
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.272
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.271
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.270
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.269
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.268
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-20: Version 6.6.267
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-19: Version 6.6.266
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-19: Version 6.6.265
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-19: Version 6.6.264
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-19: Version 6.6.263
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-19: Version 6.6.262
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-19: Version 6.6.261
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-17: Version 6.6.260
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-17: Version 6.6.259
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-16: Version 6.6.258
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-16: Version 6.6.257
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-16: Version 6.6.256
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-16: Version 6.6.255
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-15: Version 6.6.254
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-15: Version 6.6.253
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-15: Version 6.6.252
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.251
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.250
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.249
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.248
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.247
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.246
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.245
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.244
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.243
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.242
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.241
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-14: Version 6.6.240
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-13: Version 6.6.239
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-13: Version 6.6.238
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-13: Version 6.6.237
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-13: Version 6.6.236
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-13: Version 6.6.235
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-13: Version 6.6.234
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-12: Version 6.6.233
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-12: Version 6.6.232
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-12: Version 6.6.231
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-12: Version 6.6.230
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-12: Version 6.6.229
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.228
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.227
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.226
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.225
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.224
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.223
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.222
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.221
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.220
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.219
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.218
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.217
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.216
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.215
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.214
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.213
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-09: Version 6.6.212
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.211
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.210
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.209
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.208
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.207
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.206
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.205
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.204
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.203
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.202
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.201
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.200
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.199
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.198
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.197
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-08: Version 6.6.196
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.195
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.194
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.193
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.192
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.191
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.190
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.189
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.188
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.187
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.186
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.185
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.184
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.183
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.182
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.181
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-07: Version 6.6.180
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.179
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.178
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.177
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.176
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.175
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.174
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.173
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.172
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.171
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.170
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.169
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.168
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.167
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-06: Version 6.6.166
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.165
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.164
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.163
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.162
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.161
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.160
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.159
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.158
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.157
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-05: Version 6.6.156
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.155
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.154
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.153
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.152
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.151
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.150
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.149
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.148
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.147
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.146
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.145
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.144
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.143
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.142
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-02: Version 6.6.141
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.140
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.139
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.138
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.137
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.136
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.135
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.134
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.133
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.132
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.131
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.130
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.129
+
+ Performance and stability improvements on all platforms.
+
+
+2018-02-01: Version 6.6.128
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.127
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.126
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.125
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.124
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.123
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.122
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.121
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.120
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.119
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.118
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.117
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-31: Version 6.6.116
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.115
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.114
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.113
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.112
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.111
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.110
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.109
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.108
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.107
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-30: Version 6.6.106
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.105
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.104
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.103
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.102
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.101
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.100
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.99
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.98
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.97
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.96
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.95
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.94
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.93
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.92
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-29: Version 6.6.91
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-27: Version 6.6.90
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-27: Version 6.6.89
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-26: Version 6.6.88
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-26: Version 6.6.87
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-26: Version 6.6.86
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-26: Version 6.6.85
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-26: Version 6.6.84
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-26: Version 6.6.83
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.82
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.81
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.80
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.79
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.78
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.77
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.76
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.75
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.74
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.73
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.72
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.71
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.70
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.69
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.68
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-25: Version 6.6.67
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.66
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.65
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.64
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.63
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.62
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.61
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.60
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.59
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.58
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.57
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.56
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.55
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.54
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.53
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-24: Version 6.6.52
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.51
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.50
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.49
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.48
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.47
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.46
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.45
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.44
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.43
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.42
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.41
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.40
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.39
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.38
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.37
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.36
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.35
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.34
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-23: Version 6.6.33
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.32
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.31
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.30
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.29
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.28
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.27
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.26
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.25
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.24
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.23
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.22
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.21
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.20
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.19
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.18
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.17
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.16
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.15
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.14
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.13
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-22: Version 6.6.12
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-21: Version 6.6.11
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-20: Version 6.6.10
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-20: Version 6.6.9
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.8
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.7
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.6
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.5
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.4
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.3
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.2
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-19: Version 6.6.1
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-18: Version 6.5.257
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-18: Version 6.5.256
+
+ Performance and stability improvements on all platforms.
+
+
+2018-01-18: Version 6.5.255
+
+ Performance and stability improvements on all platforms.
+
+
2018-01-17: Version 6.5.254
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index bc9e4e0a90..4c67868d52 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -5,20 +5,22 @@
vars = {
'checkout_instrumented_libraries': False,
'chromium_url': 'https://chromium.googlesource.com',
- 'build_for_node': False,
+ 'download_gcmole': False,
+ 'download_jsfunfuzz': False,
+ 'download_mips_toolchain': False,
}
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b3a78cd03a95c30ff10f863f736249eb04f0f34d',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b1d6c28b4a64128ad856d9da458afda2861fddab',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c8ca2962b46670ec89071ffd1291688983cd319c',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'd888fd2a1be890f4d35e43f68d6d79f42519a357',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b7578b4132cf73ca3265e2ee0b7bd0a422a54ebf',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b745ddca2c63719167c0f2008ae19e667c5e9952',
'v8/buildtools':
- Var('chromium_url') + '/chromium/buildtools.git' + '@' + '6fe4a3251488f7af86d64fc25cf442e817cf6133',
+ Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2888931260f2a32bc583f005bd807a561b2fa6af',
'v8/base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '0e9a47d74970bee1bbfc063c47215406f8918699',
'v8/third_party/android_ndk': {
@@ -26,11 +28,11 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/android_tools': {
- 'url': Var('chromium_url') + '/android_tools.git' + '@' + 'c78b25872734e0038ae2a333edc645cd96bc232d',
+ 'url': Var('chromium_url') + '/android_tools.git' + '@' + '9a70d48fcdd68cd0e7e968f342bd767ee6323bd1',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'b4826a52853c9c2778d496f6c6fa853f777f94df',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '8a42ad3cb185e340c32b20f657980fd057e3769f',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -52,15 +54,15 @@ deps = {
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '8311965251953d4745aeb68c98fb71fab2eac1d0',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'b59d956b3c268abd0875aeb87d6688f4c7aafc9b',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '27088876ff821e8a1518383576a43662a3255d56',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b3d3f5920b161f95f1a8ffe08b75c695e0edf350',
'v8/tools/luci-go':
- Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'd882048313f6f51df29856406fa03b620c1d0205',
+ Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'ff0709d4283b1f233dcf0c9fec1672c6ecaed2f1',
'v8/test/wasm-js':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a25083ac7076b05e3f304ec9e093ef1b1ee09422',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '4653fc002a510b4f207af07f2c7c61b13dba78d9',
}
recursedeps = [
@@ -78,7 +80,6 @@ include_rules = [
# checkdeps.py shouldn't check for includes in these directories:
skip_child_includes = [
'build',
- 'gypfiles',
'third_party',
]
@@ -91,14 +92,16 @@ hooks = [
'pattern': '.',
'action': [
'python',
- 'v8/gypfiles/landmines.py',
+ 'v8/build/landmines.py',
+ '--landmine-scripts',
+ 'v8/tools/get_landmines.py',
],
},
# Pull clang-format binaries using checked-in hashes.
{
'name': 'clang_format_win',
'pattern': '.',
- 'condition': 'host_os == "win" and build_for_node != True',
+ 'condition': 'host_os == "win"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=win32',
@@ -110,7 +113,7 @@ hooks = [
{
'name': 'clang_format_mac',
'pattern': '.',
- 'condition': 'host_os == "mac" and build_for_node != True',
+ 'condition': 'host_os == "mac"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=darwin',
@@ -122,7 +125,7 @@ hooks = [
{
'name': 'clang_format_linux',
'pattern': '.',
- 'condition': 'host_os == "linux" and build_for_node != True',
+ 'condition': 'host_os == "linux"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux*',
@@ -134,28 +137,30 @@ hooks = [
{
'name': 'gcmole',
'pattern': '.',
- 'condition': 'build_for_node != True',
- # TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
- 'action': [
- 'python',
- 'v8/tools/gcmole/download_gcmole_tools.py',
+ 'condition': 'download_gcmole',
+ 'action': [ 'download_from_google_storage',
+ '--bucket', 'chrome-v8-gcmole',
+ '-u', '--no_resume',
+ '-s', 'v8/tools/gcmole/gcmole-tools.tar.gz.sha1',
+ '--platform=linux*',
],
},
{
'name': 'jsfunfuzz',
'pattern': '.',
- 'condition': 'build_for_node != True',
- # TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
- 'action': [
- 'python',
- 'v8/tools/jsfunfuzz/download_jsfunfuzz.py',
+ 'condition': 'download_jsfunfuzz',
+ 'action': [ 'download_from_google_storage',
+ '--bucket', 'chrome-v8-jsfunfuzz',
+ '-u', '--no_resume',
+ '-s', 'v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1',
+ '--platform=linux*',
],
},
# Pull luci-go binaries (isolate, swarming) using checked-in hashes.
{
'name': 'luci-go_win',
'pattern': '.',
- 'condition': 'host_os == "win" and build_for_node != True',
+ 'condition': 'host_os == "win"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=win32',
@@ -167,7 +172,7 @@ hooks = [
{
'name': 'luci-go_mac',
'pattern': '.',
- 'condition': 'host_os == "mac" and build_for_node != True',
+ 'condition': 'host_os == "mac"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=darwin',
@@ -179,7 +184,7 @@ hooks = [
{
'name': 'luci-go_linux',
'pattern': '.',
- 'condition': 'host_os == "linux" and build_for_node != True',
+ 'condition': 'host_os == "linux"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux*',
@@ -228,7 +233,6 @@ hooks = [
{
'name': 'wasm_spec_tests',
'pattern': '.',
- 'condition': 'build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -240,7 +244,6 @@ hooks = [
{
'name': 'closure_compiler',
'pattern': '.',
- 'condition': 'build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -250,17 +253,39 @@ hooks = [
],
},
{
- # Downloads the current stable linux sysroot to build/linux/ if needed.
- # This sysroot updates at about the same rate that the chrome build deps
- # change.
- 'name': 'sysroot',
+ 'name': 'sysroot_arm',
'pattern': '.',
- 'condition': 'build_for_node != True',
- 'action': [
- 'python',
- 'v8/build/linux/sysroot_scripts/install-sysroot.py',
- '--running-as-hook',
- ],
+ 'condition': 'checkout_linux and checkout_arm',
+ 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
+ '--arch=arm'],
+ },
+ {
+ 'name': 'sysroot_arm64',
+ 'pattern': '.',
+ 'condition': 'checkout_linux and checkout_arm64',
+ 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
+ '--arch=arm64'],
+ },
+ {
+ 'name': 'sysroot_x86',
+ 'pattern': '.',
+ 'condition': 'checkout_linux and (checkout_x86 or checkout_x64)',
+ 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
+ '--arch=x86'],
+ },
+ {
+ 'name': 'sysroot_mips',
+ 'pattern': '.',
+ 'condition': 'checkout_linux and checkout_mips',
+ 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
+ '--arch=mips'],
+ },
+ {
+ 'name': 'sysroot_x64',
+ 'pattern': '.',
+ 'condition': 'checkout_linux and checkout_x64',
+ 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
+ '--arch=x64'],
},
{
'name': 'msan_chained_origins',
@@ -297,7 +322,7 @@ hooks = [
{
'name': 'binutils',
'pattern': 'v8/third_party/binutils',
- 'condition': 'host_os == "linux" and build_for_node != True',
+ 'condition': 'host_os == "linux"',
'action': [
'python',
'v8/third_party/binutils/download.py',
@@ -307,6 +332,8 @@ hooks = [
# Note: On Win, this should run after win_toolchain, as it may use it.
'name': 'clang',
'pattern': '.',
+ # clang not supported on aix
+ 'condition': 'host_os != "aix"',
'action': ['python', 'v8/tools/clang/scripts/update.py'],
},
{
@@ -316,15 +343,20 @@ hooks = [
'action': [
'python',
'v8/build/fuchsia/update_sdk.py',
- '226f6dd0cad1d6be63a353ce2649423470729ae9',
],
},
{
- # A change to a .gyp, .gypi, or to GYP itself should run the generator.
- 'name': 'regyp_if_needed',
+ 'name': 'mips_toolchain',
'pattern': '.',
- 'condition': 'build_for_node != True',
- 'action': ['python', 'v8/gypfiles/gyp_v8', '--running-as-hook'],
+ 'condition': 'download_mips_toolchain',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=linux',
+ '--no_auth',
+ '-u',
+ '--bucket', 'chromium-v8',
+ '-s', 'v8/tools/mips_toolchain.tar.gz.sha1',
+ ],
},
# Download and initialize "vpython" VirtualEnv environment packages.
{
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
deleted file mode 100644
index 167ebf8c08..0000000000
--- a/deps/v8/Makefile
+++ /dev/null
@@ -1,493 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-# Variable default definitions. Override them by exporting them in your shell.
-OUTDIR ?= out
-TESTJOBS ?=
-GYPFLAGS ?=
-TESTFLAGS ?=
-ANDROID_NDK_HOST_ARCH ?=
-ANDROID_V8 ?= /data/local/tmp/v8
-
-# Special build flags. Use them like this: "make library=shared"
-
-# library=shared || component=shared_library
-ifeq ($(library), shared)
- GYPFLAGS += -Dcomponent=shared_library
-endif
-ifdef component
- GYPFLAGS += -Dcomponent=$(component)
-endif
-# disassembler=on
-ifeq ($(disassembler), on)
- GYPFLAGS += -Dv8_enable_disassembler=1
-endif
-# objectprint=on
-ifeq ($(objectprint), on)
- GYPFLAGS += -Dv8_object_print=1
-endif
-# verifycsa=on
-ifeq ($(verifycsa), on)
- GYPFLAGS += -Dv8_enable_verify_csa=1
-endif
-# verifyheap=on
-ifeq ($(verifyheap), on)
- GYPFLAGS += -Dv8_enable_verify_heap=1
-endif
-# tracemaps=on
-ifeq ($(tracemaps), on)
- GYPFLAGS += -Dv8_trace_maps=1
-endif
-# concurrentmarking=on
-ifeq ($(concurrentmarking), on)
- GYPFLAGS += -Dv8_enable_concurrent_marking=1
-endif
-# backtrace=off
-ifeq ($(backtrace), off)
- GYPFLAGS += -Dv8_enable_backtrace=0
-else
- GYPFLAGS += -Dv8_enable_backtrace=1
-endif
-# verifypredictable=on
-ifeq ($(verifypredictable), on)
- GYPFLAGS += -Dv8_enable_verify_predictable=1
-endif
-# snapshot=off
-ifeq ($(snapshot), off)
- GYPFLAGS += -Dv8_use_snapshot='false'
-endif
-ifeq ($(snapshot), external)
- GYPFLAGS += -Dv8_use_external_startup_data=1
-endif
-# extrachecks=on/off
-ifeq ($(extrachecks), on)
- GYPFLAGS += -Ddcheck_always_on=1 -Dv8_enable_handle_zapping=1
-endif
-ifeq ($(extrachecks), off)
- GYPFLAGS += -Ddcheck_always_on=0 -Dv8_enable_handle_zapping=0
-endif
-# slowdchecks=on/off
-ifeq ($(slowdchecks), on)
- GYPFLAGS += -Dv8_enable_slow_dchecks=1
-endif
-ifeq ($(slowdchecks), off)
- GYPFLAGS += -Dv8_enable_slow_dchecks=0
-endif
-# debugsymbols=on
-ifeq ($(debugsymbols), on)
- GYPFLAGS += -Drelease_extra_cflags=-ggdb3
-endif
-# gdbjit=on/off
-ifeq ($(gdbjit), on)
- GYPFLAGS += -Dv8_enable_gdbjit=1
-endif
-ifeq ($(gdbjit), off)
- GYPFLAGS += -Dv8_enable_gdbjit=0
-endif
-# vtunejit=on
-ifeq ($(vtunejit), on)
- GYPFLAGS += -Dv8_enable_vtunejit=1
-endif
-# unalignedaccess=on
-ifeq ($(unalignedaccess), on)
- GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
-endif
-# randomseed=12345, disable random seed via randomseed=0
-ifdef randomseed
- GYPFLAGS += -Dv8_random_seed=$(randomseed)
-endif
-# soname_version=1.2.3
-ifdef soname_version
- GYPFLAGS += -Dsoname_version=$(soname_version)
-endif
-# werror=no
-ifeq ($(werror), no)
- GYPFLAGS += -Dwerror=''
-endif
-# strictaliasing=off (workaround for GCC-4.5)
-ifeq ($(strictaliasing), off)
- GYPFLAGS += -Dv8_no_strict_aliasing=1
-endif
-# regexp=interpreted
-ifeq ($(regexp), interpreted)
- GYPFLAGS += -Dv8_interpreted_regexp=1
-endif
-# i18nsupport=off
-ifeq ($(i18nsupport), off)
- GYPFLAGS += -Dv8_enable_i18n_support=0
- TESTFLAGS += --noi18n
-endif
-# deprecationwarnings=on
-ifeq ($(deprecationwarnings), on)
- GYPFLAGS += -Dv8_deprecation_warnings=1
-endif
-# vectorstores=on
-ifeq ($(vectorstores), on)
- GYPFLAGS += -Dv8_vector_stores=1
-endif
-# imminentdeprecationwarnings=on
-ifeq ($(imminentdeprecationwarnings), on)
- GYPFLAGS += -Dv8_imminent_deprecation_warnings=1
-endif
-# asan=on
-ifeq ($(asan), on)
- GYPFLAGS += -Dasan=1 -Dclang=1
- TESTFLAGS += --asan
- ifeq ($(lsan), on)
- GYPFLAGS += -Dlsan=1
- endif
-endif
-ifdef embedscript
- GYPFLAGS += -Dembed_script=$(embedscript)
-endif
-ifdef warmupscript
- GYPFLAGS += -Dwarmup_script=$(warmupscript)
-endif
-ifeq ($(goma), on)
- GYPFLAGS += -Duse_goma=1
-endif
-# v8_os_page_size=0, when 0 or not specified use build OS page size
-ifdef v8_os_page_size
- ifneq ($(v8_os_page_size), 0)
- ifneq ($(snapshot), off)
- GYPFLAGS += -Dv8_os_page_size=$(v8_os_page_size)
- endif
- endif
-endif
-# arm specific flags.
-# arm_version=<number | "default">
-ifneq ($(strip $(arm_version)),)
- GYPFLAGS += -Darm_version=$(arm_version)
-else
-# Deprecated (use arm_version instead): armv7=false/true
-ifeq ($(armv7), false)
- GYPFLAGS += -Darm_version=6
-else
-ifeq ($(armv7), true)
- GYPFLAGS += -Darm_version=7
-endif
-endif
-endif
-# hardfp=on/off. Deprecated, use armfloatabi
-ifeq ($(hardfp),on)
- GYPFLAGS += -Darm_float_abi=hard
-else
-ifeq ($(hardfp),off)
- GYPFLAGS += -Darm_float_abi=softfp
-endif
-endif
-# fpu: armfpu=xxx
-# xxx: vfp, vfpv3-d16, vfpv3, neon.
-ifeq ($(armfpu),)
- GYPFLAGS += -Darm_fpu=default
-else
- GYPFLAGS += -Darm_fpu=$(armfpu)
-endif
-# float abi: armfloatabi=softfp/hard
-ifeq ($(armfloatabi),)
-ifeq ($(hardfp),)
- GYPFLAGS += -Darm_float_abi=default
-endif
-else
- GYPFLAGS += -Darm_float_abi=$(armfloatabi)
-endif
-# armthumb=on/off
-ifeq ($(armthumb), off)
- GYPFLAGS += -Darm_thumb=0
-else
-ifeq ($(armthumb), on)
- GYPFLAGS += -Darm_thumb=1
-endif
-endif
-# arm_test_noprobe=on
-# With this flag set, by default v8 will only use features implied
-# by the compiler (no probe). This is done by modifying the default
-# values of enable_armv7, enable_vfp3, enable_32dregs and enable_neon.
-# Modifying these flags when launching v8 will enable the probing for
-# the specified values.
-ifeq ($(arm_test_noprobe), on)
- GYPFLAGS += -Darm_test_noprobe=on
-endif
-# Do not omit the frame pointer, needed for profiling with perf
-ifeq ($(no_omit_framepointer), on)
- GYPFLAGS += -Drelease_extra_cflags=-fno-omit-frame-pointer
-endif
-
-ifdef android_ndk_root
- GYPFLAGS += -Dandroid_ndk_root=$(android_ndk_root)
- export ANDROID_NDK_ROOT = $(android_ndk_root)
-endif
-
-# ----------------- available targets: --------------------
-# - any arch listed in ARCHES (see below)
-# - any mode listed in MODES
-# - every combination <arch>.<mode>, e.g. "ia32.release"
-# - "native": current host's architecture, release mode
-# - any of the above with .check appended, e.g. "ia32.release.check"
-# - "android": cross-compile for Android/ARM
-# - default (no target specified): build all DEFAULT_ARCHES and MODES
-# - "check": build all targets and run all tests
-# - "<arch>.clean" for any <arch> in ARCHES
-# - "clean": clean all ARCHES
-
-# ----------------- internal stuff ------------------------
-
-# Architectures and modes to be compiled. Consider these to be internal
-# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el ppc ppc64 s390 s390x
-ARCHES32 = ia32 arm mips mipsel ppc s390
-DEFAULT_ARCHES = ia32 x64 arm
-MODES = release debug optdebug
-DEFAULT_MODES = release debug
-ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
- android_mipsel
-
-# List of files that trigger Makefile regeneration:
-GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
- gypfiles/shim_headers.gypi gypfiles/features.gypi \
- gypfiles/standalone.gypi \
- gypfiles/toolchain.gypi gypfiles/all.gyp gypfiles/mac/asan.gyp \
- test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \
- test/unittests/unittests.gyp src/v8.gyp \
- tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
- samples/samples.gyp src/third_party/vtune/v8vtune.gyp src/d8.gyp
-
-# If vtunejit=on, the v8vtune.gyp will be appended.
-ifeq ($(vtunejit), on)
- GYPFILES += src/third_party/vtune/v8vtune.gyp
-endif
-# Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
-BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
-ANDROID_BUILDS = $(foreach mode,$(MODES), \
- $(addsuffix .$(mode),$(ANDROID_ARCHES)))
-# Generates corresponding test targets, e.g. "ia32.release.check".
-CHECKS = $(addsuffix .check,$(BUILDS))
-QUICKCHECKS = $(addsuffix .quickcheck,$(BUILDS))
-ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
-# File where previously used GYPFLAGS are stored.
-ENVFILE = $(OUTDIR)/environment
-
-.PHONY: all check clean builddeps dependencies $(ENVFILE).new native \
- qc quickcheck $(QUICKCHECKS) turbocheck \
- $(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
- $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
- $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
- $(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS)
-
-# Target definitions. "all" is the default.
-all: $(DEFAULT_MODES)
-
-# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
-# having been created before.
-buildbot:
- $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
- builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
-
-# Compile targets. MODES and ARCHES are convenience targets.
-.SECONDEXPANSION:
-$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
-
-$(ARCHES): $(addprefix $$@.,$(DEFAULT_MODES))
-
-# Defines how to build a particular target (e.g. ia32.release).
-$(BUILDS): $(OUTDIR)/Makefile.$$@
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
- BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
- python -c "print \
- raw_input().replace('opt', '').capitalize()") \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
-
-native: $(OUTDIR)/Makefile.native
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.native \
- BUILDTYPE=Release \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
-
-$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
-
-$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android
- @$(MAKE) -f Makefile.android $@ \
- ARCH="$(basename $@)" \
- MODE="$(subst .,,$(suffix $@))" \
- OUTDIR="$(OUTDIR)" \
- GYPFLAGS="$(GYPFLAGS)"
-
-# Test targets.
-check: all
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \
- $(TESTFLAGS)
-
-$(addsuffix .check,$(MODES)): $$(basename $$@)
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --mode=$(basename $@) $(TESTFLAGS)
-
-$(addsuffix .check,$(ARCHES)): $$(basename $$@)
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch=$(basename $@) $(TESTFLAGS)
-
-$(CHECKS): $$(basename $$@)
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(basename $@) $(TESTFLAGS)
-
-$(addsuffix .quickcheck,$(MODES)): $$(basename $$@)
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --mode=$(basename $@) $(TESTFLAGS) --quickcheck
-
-$(addsuffix .quickcheck,$(ARCHES)): $$(basename $$@)
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch=$(basename $@) $(TESTFLAGS) --quickcheck
-
-$(QUICKCHECKS): $$(basename $$@)
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(basename $@) $(TESTFLAGS) --quickcheck
-
-$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
- @tools/android-sync.sh $(basename $@) $(OUTDIR) \
- $(shell pwd) $(ANDROID_V8)
-
-$(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(basename $@) \
- --timeout=600 \
- --command-prefix="tools/android-run.py" $(TESTFLAGS)
-
-$(addsuffix .check, $(ANDROID_ARCHES)): \
- $(addprefix $$(basename $$@).,$(MODES)).check
-
-native.check: native
- @gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
- --arch-and-mode=. $(TESTFLAGS)
-
-SUPERFASTTESTMODES = ia32.release
-FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,arm64.release
-FASTCOMPILEMODES = $(FASTTESTMODES),arm64.optdebug
-
-COMMA = ,
-EMPTY =
-SPACE = $(EMPTY) $(EMPTY)
-quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
- gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) --quickcheck \
- --download-data mozilla webkit
- gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
-qc: quickcheck
-
-turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
- gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) \
- --quickcheck --variants=turbofan --download-data mozilla webkit
- gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) \
- --quickcheck --variants=turbofan
-tc: turbocheck
-
-# Clean targets. You can clean each architecture individually, or everything.
-$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
- rm -f $(OUTDIR)/Makefile.$(basename $@)*
- rm -rf $(OUTDIR)/$(basename $@).release
- rm -rf $(OUTDIR)/$(basename $@).debug
- rm -rf $(OUTDIR)/$(basename $@).optdebug
- find $(OUTDIR) -regex '.*\(host\|target\)\.$(basename $@).*\.mk' -delete
-
-native.clean:
- rm -f $(OUTDIR)/Makefile.native
- rm -rf $(OUTDIR)/native
- find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete
-
-clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean gtags.clean tags.clean
-
-# GYP file generation targets.
-OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
-$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
- $(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
- cut -f 2 -d " " | cut -f 1 -d "-" ))
- $(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
- $(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
- $(eval CXX_TARGET_ARCH:=$(subst s390x,s390,$(CXX_TARGET_ARCH)))
- $(eval CXX_TARGET_ARCH:=$(subst powerpc,ppc,$(CXX_TARGET_ARCH)))
- $(eval CXX_TARGET_ARCH:=$(subst ppc64,ppc,$(CXX_TARGET_ARCH)))
- $(eval CXX_TARGET_ARCH:=$(subst ppcle,ppc,$(CXX_TARGET_ARCH)))
- $(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@))))
- PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
- GYP_GENERATORS=make \
- tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
- -Igypfiles/standalone.gypi --depth=. \
- -Dv8_target_arch=$(V8_TARGET_ARCH) \
- $(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \
- -Dtarget_arch=$(V8_TARGET_ARCH), \
- $(if $(shell echo $(ARCHES32) | grep $(V8_TARGET_ARCH)), \
- -Dtarget_arch=ia32,)) \
- $(if $(findstring optdebug,$@),-Dv8_optimized_debug=1,) \
- -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
-
-$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
- PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
- GYP_GENERATORS=make \
- tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
- -Igypfiles/standalone.gypi --depth=. -S.native $(GYPFLAGS)
-
-# Replaces the old with the new environment file if they're different, which
-# will trigger GYP to regenerate Makefiles.
-$(ENVFILE): $(ENVFILE).new
- @if test -r $(ENVFILE) && cmp $(ENVFILE).new $(ENVFILE) > /dev/null; \
- then rm $(ENVFILE).new; \
- else mv $(ENVFILE).new $(ENVFILE); fi
-
-# Stores current GYPFLAGS in a file.
-$(ENVFILE).new:
- $(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
- cut -f 2 -d " " | cut -f 1 -d "-" ))
- $(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
- $(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
- @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS) -Dtarget_arch=$(CXX_TARGET_ARCH)" > $(ENVFILE).new;
-
-# Support for the GNU GLOBAL Source Code Tag System.
-gtags.files: $(GYPFILES) $(ENVFILE)
- @find include src test -name '*.h' -o -name '*.cc' -o -name '*.c' > $@
-
-# We need to manually set the stack limit here, to work around bugs in
-# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
-# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
-# gtags.files after switching branches don't cause recipe failures.
-GPATH GRTAGS GSYMS GTAGS: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
- @bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
-
-gtags.clean:
- rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
-
-tags: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
- @(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
- (echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
- ctags --fields=+l -L $<
-
-tags.clean:
- rm -r tags
-
-dependencies builddeps:
- $(error Use 'gclient sync' instead)
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
deleted file mode 100644
index 417152177d..0000000000
--- a/deps/v8/Makefile.android
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
- android_mipsel android_x87
-MODES = release debug
-
-# Generates all combinations of ANDROID ARCHES and MODES,
-# e.g. "android_ia32.release" or "android_arm.release"
-ANDROID_BUILDS = $(foreach mode,$(MODES), \
- $(addsuffix .$(mode),$(ANDROID_ARCHES)))
-
-ifeq ($(ARCH), android_arm)
- DEFINES = target_arch=arm v8_target_arch=arm
-else ifeq ($(ARCH), android_arm64)
- DEFINES = target_arch=arm64 v8_target_arch=arm64
-else ifeq ($(ARCH), android_mipsel)
- DEFINES = target_arch=mipsel v8_target_arch=mipsel
-else ifeq ($(ARCH), android_ia32)
- DEFINES = target_arch=ia32 v8_target_arch=ia32
-else ifeq ($(ARCH), android_x64)
- DEFINES = target_arch=x64 v8_target_arch=x64
-else ifeq ($(ARCH), android_x87)
- DEFINES = target_arch=ia32 v8_target_arch=x87
-else
- $(error Target architecture "${ARCH}" is not supported)
-endif
-
-# Common flags.
-DEFINES += OS=android
-
-.SECONDEXPANSION:
-$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
- BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
- python -c "print raw_input().capitalize()") \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
-
-# Android GYP file generation targets.
-ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
-$(ANDROID_MAKEFILES):
- GYP_GENERATORS=make-android \
- GYP_DEFINES="${DEFINES}" \
- PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH)" \
- tools/gyp/gyp --generator-output="${OUTDIR}" gypfiles/all.gyp \
- -Igypfiles/standalone.gypi --depth=. \
- -S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 2583a229b6..d8e0fc60de 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -7,6 +7,7 @@ bradnelson@chromium.org
cbruni@chromium.org
clemensh@chromium.org
danno@chromium.org
+delphick@chromium.org
eholk@chromium.org
franzih@chromium.org
gdeepti@chromium.org
@@ -17,26 +18,22 @@ ishell@chromium.org
jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
-jochen@chromium.org
leszeks@chromium.org
-littledan@chromium.org
machenbach@chromium.org
marja@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
-mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
petermarshall@chromium.org
rmcilroy@chromium.org
-rossberg@chromium.org
sergiyb@chromium.org
+sigurds@chromium.org
tebbi@chromium.org
titzer@chromium.org
ulan@chromium.org
verwaest@chromium.org
-vogelheim@chromium.org
yangguo@chromium.org
# TEAM: v8-dev@googlegroups.com
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index b69e8f5089..e6dbb79395 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -153,6 +153,62 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results
+def _CheckHeadersHaveIncludeGuards(input_api, output_api):
+ """Ensures that all header files have include guards."""
+ file_inclusion_pattern = r'src/.+\.h'
+
+ def FilterFile(affected_file):
+ black_list = (_EXCLUDED_PATHS +
+ input_api.DEFAULT_BLACK_LIST)
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=(file_inclusion_pattern, ),
+ black_list=black_list)
+
+ leading_src_pattern = input_api.re.compile(r'^src/')
+ dash_dot_slash_pattern = input_api.re.compile(r'[-./]')
+ def PathToGuardMacro(path):
+ """Guards should be of the form V8_PATH_TO_FILE_WITHOUT_SRC_H_."""
+ x = input_api.re.sub(leading_src_pattern, 'v8_', path)
+ x = input_api.re.sub(dash_dot_slash_pattern, '_', x)
+ x = x.upper() + "_"
+ return x
+
+ problems = []
+ for f in input_api.AffectedSourceFiles(FilterFile):
+ local_path = f.LocalPath()
+ guard_macro = PathToGuardMacro(local_path)
+ guard_patterns = [
+ input_api.re.compile(r'^#ifndef ' + guard_macro + '$'),
+ input_api.re.compile(r'^#define ' + guard_macro + '$'),
+ input_api.re.compile(r'^#endif // ' + guard_macro + '$')]
+ skip_check_pattern = input_api.re.compile(
+ r'^// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD')
+ found_patterns = [ False, False, False ]
+ file_omitted = False
+
+ for line in f.NewContents():
+ for i in range(len(guard_patterns)):
+ if guard_patterns[i].match(line):
+ found_patterns[i] = True
+ if skip_check_pattern.match(line):
+ file_omitted = True
+ break
+
+ if not file_omitted and not all(found_patterns):
+ problems.append(
+ '%s: Missing include guard \'%s\'' % (local_path, guard_macro))
+
+ if problems:
+ return [output_api.PresubmitError(
+ 'You added one or more header files without an appropriate\n'
+ 'include guard. Add the include guard {#ifndef,#define,#endif}\n'
+ 'triplet or omit the check entirely through the magic comment:\n'
+ '"// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD".', problems)]
+ else:
+ return []
+
+
# TODO(mstarzinger): Similar checking should be made available as part of
# tools/presubmit.py (note that tools/check-inline-includes.sh exists).
def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
@@ -230,44 +286,10 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
return []
-def _CheckMissingFiles(input_api, output_api):
- """Runs verify_source_deps.py to ensure no files were added that are not in
- GN.
- """
- # We need to wait until we have an input_api object and use this
- # roundabout construct to import checkdeps because this file is
- # eval-ed and thus doesn't have __file__.
- original_sys_path = sys.path
- try:
- sys.path = sys.path + [input_api.os_path.join(
- input_api.PresubmitLocalPath(), 'tools')]
- from verify_source_deps import missing_gn_files, missing_gyp_files
- finally:
- # Restore sys.path to what it was before.
- sys.path = original_sys_path
-
- gn_files = missing_gn_files()
- gyp_files = missing_gyp_files()
- results = []
- if gn_files:
- results.append(output_api.PresubmitError(
- "You added one or more source files but didn't update the\n"
- "corresponding BUILD.gn files:\n",
- gn_files))
- if gyp_files:
- results.append(output_api.PresubmitError(
- "You added one or more source files but didn't update the\n"
- "corresponding gyp files:\n",
- gyp_files))
- return results
-
-
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckCommitMessageBugEntry(input_api, output_api))
- results.extend(input_api.canned_checks.CheckOwners(
- input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(input_api.canned_checks.CheckGenderNeutral(
@@ -276,9 +298,9 @@ def _CommonChecks(input_api, output_api):
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
+ results.extend(_CheckHeadersHaveIncludeGuards(input_api, output_api))
results.extend(
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
- results.extend(_CheckMissingFiles(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
results.extend(_CheckMacroUndefs(input_api, output_api))
results.extend(input_api.RunTests(
diff --git a/deps/v8/gypfiles/win/msvs_dependencies.isolate b/deps/v8/gni/msvs_dependencies.isolate
index 2859126659..2859126659 100644
--- a/deps/v8/gypfiles/win/msvs_dependencies.isolate
+++ b/deps/v8/gni/msvs_dependencies.isolate
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index ce9953ac68..16a124dce8 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -33,7 +33,9 @@ declare_args() {
# Enable the snapshot feature, for fast context creation.
# http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
- v8_use_snapshot = true
+ # TODO(thakis): Make snapshots work in 64-bit win/cross builds,
+ # https://803591
+ v8_use_snapshot = !(is_win && host_os != "win" && target_cpu == "x64")
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
@@ -105,6 +107,12 @@ if (is_posix && (v8_enable_backtrace || v8_monolithic)) {
v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
}
+# On MIPS gcc_target_rpath and ldso_path might be needed for all builds.
+if (target_cpu == "mipsel" || target_cpu == "mips64el" ||
+ target_cpu == "mips" || target_cpu == "mips64") {
+ v8_add_configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
+}
+
# All templates should be kept in sync.
template("v8_source_set") {
if (defined(invoker.split_count) && invoker.split_count > 1 &&
diff --git a/deps/v8/gypfiles/all.gyp b/deps/v8/gypfiles/all.gyp
index 2f9cf858c0..5592ee1b36 100644
--- a/deps/v8/gypfiles/all.gyp
+++ b/deps/v8/gypfiles/all.gyp
@@ -8,46 +8,23 @@
'target_name': 'All',
'type': 'none',
'dependencies': [
- '../src/d8.gyp:d8',
- '../test/inspector/inspector.gyp:*',
- '../test/mkgrokdump/mkgrokdump.gyp:*',
+ 'd8.gyp:d8',
+ 'inspector-test.gyp:*',
+ 'mkgrokdump.gyp:*',
],
'conditions': [
['component!="shared_library"', {
'dependencies': [
- '../tools/parser-shell.gyp:parser-shell',
+ 'parser-shell.gyp:parser-shell',
],
}],
# These items don't compile for Android on Mac.
['host_os!="mac" or OS!="android"', {
'dependencies': [
- '../samples/samples.gyp:*',
- '../test/cctest/cctest.gyp:*',
- '../test/fuzzer/fuzzer.gyp:*',
- '../test/unittests/unittests.gyp:*',
- ],
- }],
- ['test_isolation_mode != "noop"', {
- 'dependencies': [
- '../test/bot_default.gyp:*',
- '../test/benchmarks/benchmarks.gyp:*',
- '../test/debugger/debugger.gyp:*',
- '../test/default.gyp:*',
- '../test/d8_default.gyp:*',
- '../test/intl/intl.gyp:*',
- '../test/message/message.gyp:*',
- '../test/mjsunit/mjsunit.gyp:*',
- '../test/mozilla/mozilla.gyp:*',
- '../test/optimize_for_size.gyp:*',
- '../test/perf.gyp:*',
- '../test/preparser/preparser.gyp:*',
- '../test/test262/test262.gyp:*',
- '../test/webkit/webkit.gyp:*',
- '../tools/check-static-initializers.gyp:*',
- '../tools/gcmole/run_gcmole.gyp:*',
- '../tools/jsfunfuzz/jsfunfuzz.gyp:*',
- '../tools/run-deopt-fuzzer.gyp:*',
- '../tools/run-num-fuzzer.gyp:*',
+ 'samples.gyp:*',
+ 'cctest.gyp:*',
+ 'fuzzer.gyp:*',
+ 'unittests.gyp:*',
],
}],
]
diff --git a/deps/v8/gypfiles/cctest.gyp b/deps/v8/gypfiles/cctest.gyp
new file mode 100644
index 0000000000..6e7fddae0b
--- /dev/null
+++ b/deps/v8/gypfiles/cctest.gyp
@@ -0,0 +1,468 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ 'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
+ 'cctest_sources': [
+ '../test/cctest/compiler/c-signature.h',
+ '../test/cctest/compiler/call-tester.h',
+ '../test/cctest/compiler/codegen-tester.cc',
+ '../test/cctest/compiler/codegen-tester.h',
+ '../test/cctest/compiler/code-assembler-tester.h',
+ '../test/cctest/compiler/function-tester.cc',
+ '../test/cctest/compiler/function-tester.h',
+ '../test/cctest/compiler/graph-builder-tester.h',
+ '../test/cctest/compiler/test-basic-block-profiler.cc',
+ '../test/cctest/compiler/test-branch-combine.cc',
+ '../test/cctest/compiler/test-run-unwinding-info.cc',
+ '../test/cctest/compiler/test-gap-resolver.cc',
+ '../test/cctest/compiler/test-graph-visualizer.cc',
+ '../test/cctest/compiler/test-code-generator.cc',
+ '../test/cctest/compiler/test-code-assembler.cc',
+ '../test/cctest/compiler/test-instruction.cc',
+ '../test/cctest/compiler/test-js-context-specialization.cc',
+ '../test/cctest/compiler/test-js-constant-cache.cc',
+ '../test/cctest/compiler/test-js-typed-lowering.cc',
+ '../test/cctest/compiler/test-jump-threading.cc',
+ '../test/cctest/compiler/test-linkage.cc',
+ '../test/cctest/compiler/test-loop-analysis.cc',
+ '../test/cctest/compiler/test-machine-operator-reducer.cc',
+ '../test/cctest/compiler/test-multiple-return.cc',
+ '../test/cctest/compiler/test-node.cc',
+ '../test/cctest/compiler/test-operator.cc',
+ '../test/cctest/compiler/test-representation-change.cc',
+ '../test/cctest/compiler/test-run-bytecode-graph-builder.cc',
+ '../test/cctest/compiler/test-run-calls-to-external-references.cc',
+ '../test/cctest/compiler/test-run-deopt.cc',
+ '../test/cctest/compiler/test-run-intrinsics.cc',
+ '../test/cctest/compiler/test-run-jsbranches.cc',
+ '../test/cctest/compiler/test-run-jscalls.cc',
+ '../test/cctest/compiler/test-run-jsexceptions.cc',
+ '../test/cctest/compiler/test-run-jsobjects.cc',
+ '../test/cctest/compiler/test-run-jsops.cc',
+ '../test/cctest/compiler/test-run-load-store.cc',
+ '../test/cctest/compiler/test-run-machops.cc',
+ '../test/cctest/compiler/test-run-native-calls.cc',
+ '../test/cctest/compiler/test-run-retpoline.cc',
+ '../test/cctest/compiler/test-run-stackcheck.cc',
+ '../test/cctest/compiler/test-run-stubs.cc',
+ '../test/cctest/compiler/test-run-tail-calls.cc',
+ '../test/cctest/compiler/test-run-variables.cc',
+ '../test/cctest/compiler/test-run-wasm-machops.cc',
+ '../test/cctest/compiler/value-helper.cc',
+ '../test/cctest/compiler/value-helper.h',
+ '../test/cctest/cctest.cc',
+ '../test/cctest/cctest.h',
+ '../test/cctest/expression-type-collector-macros.h',
+ '../test/cctest/gay-fixed.cc',
+ '../test/cctest/gay-fixed.h',
+ '../test/cctest/gay-precision.cc',
+ '../test/cctest/gay-precision.h',
+ '../test/cctest/gay-shortest.cc',
+ '../test/cctest/gay-shortest.h',
+ '../test/cctest/heap/heap-tester.h',
+ '../test/cctest/heap/heap-utils.cc',
+ '../test/cctest/heap/heap-utils.h',
+ '../test/cctest/heap/test-alloc.cc',
+ '../test/cctest/heap/test-array-buffer-tracker.cc',
+ '../test/cctest/heap/test-compaction.cc',
+ '../test/cctest/heap/test-concurrent-marking.cc',
+ '../test/cctest/heap/test-embedder-tracing.cc',
+ '../test/cctest/heap/test-heap.cc',
+ '../test/cctest/heap/test-incremental-marking.cc',
+ '../test/cctest/heap/test-invalidated-slots.cc',
+ '../test/cctest/heap/test-lab.cc',
+ '../test/cctest/heap/test-mark-compact.cc',
+ '../test/cctest/heap/test-page-promotion.cc',
+ '../test/cctest/heap/test-spaces.cc',
+ '../test/cctest/interpreter/interpreter-tester.cc',
+ '../test/cctest/interpreter/interpreter-tester.h',
+ '../test/cctest/interpreter/source-position-matcher.cc',
+ '../test/cctest/interpreter/source-position-matcher.h',
+ '../test/cctest/interpreter/test-bytecode-generator.cc',
+ '../test/cctest/interpreter/test-interpreter.cc',
+ '../test/cctest/interpreter/test-interpreter-intrinsics.cc',
+ '../test/cctest/interpreter/test-source-positions.cc',
+ '../test/cctest/interpreter/bytecode-expectations-printer.cc',
+ '../test/cctest/interpreter/bytecode-expectations-printer.h',
+ '../test/cctest/libplatform/test-tracing.cc',
+ '../test/cctest/libsampler/test-sampler.cc',
+ '../test/cctest/parsing/test-parse-decision.cc',
+ '../test/cctest/parsing/test-preparser.cc',
+ '../test/cctest/parsing/test-scanner-streams.cc',
+ '../test/cctest/parsing/test-scanner.cc',
+ '../test/cctest/print-extension.cc',
+ '../test/cctest/print-extension.h',
+ '../test/cctest/profiler-extension.cc',
+ '../test/cctest/profiler-extension.h',
+ '../test/cctest/scope-test-helper.h',
+ '../test/cctest/setup-isolate-for-tests.cc',
+ '../test/cctest/setup-isolate-for-tests.h',
+ '../test/cctest/test-access-checks.cc',
+ '../test/cctest/test-accessor-assembler.cc',
+ '../test/cctest/test-accessors.cc',
+ '../test/cctest/test-allocation.cc',
+ '../test/cctest/test-api.cc',
+ '../test/cctest/test-api.h',
+ '../test/cctest/test-api-accessors.cc',
+ '../test/cctest/test-api-interceptors.cc',
+ '../test/cctest/test-array-list.cc',
+ '../test/cctest/test-atomicops.cc',
+ '../test/cctest/test-bignum.cc',
+ '../test/cctest/test-bignum-dtoa.cc',
+ '../test/cctest/test-bit-vector.cc',
+ '../test/cctest/test-circular-queue.cc',
+ '../test/cctest/test-code-layout.cc',
+ '../test/cctest/test-code-stub-assembler.cc',
+ '../test/cctest/test-compiler.cc',
+ '../test/cctest/test-constantpool.cc',
+ '../test/cctest/test-conversions.cc',
+ '../test/cctest/test-cpu-profiler.cc',
+ '../test/cctest/test-date.cc',
+ '../test/cctest/test-debug.cc',
+ '../test/cctest/test-decls.cc',
+ '../test/cctest/test-deoptimization.cc',
+ '../test/cctest/test-dictionary.cc',
+ '../test/cctest/test-diy-fp.cc',
+ '../test/cctest/test-double.cc',
+ '../test/cctest/test-dtoa.cc',
+ '../test/cctest/test-elements-kind.cc',
+ '../test/cctest/test-fast-dtoa.cc',
+ '../test/cctest/test-feedback-vector.cc',
+ '../test/cctest/test-feedback-vector.h',
+ '../test/cctest/test-field-type-tracking.cc',
+ '../test/cctest/test-fixed-dtoa.cc',
+ '../test/cctest/test-flags.cc',
+ '../test/cctest/test-func-name-inference.cc',
+ '../test/cctest/test-global-handles.cc',
+ '../test/cctest/test-global-object.cc',
+ '../test/cctest/test-hashcode.cc',
+ '../test/cctest/test-hashmap.cc',
+ '../test/cctest/test-heap-profiler.cc',
+ '../test/cctest/test-identity-map.cc',
+ '../test/cctest/test-intl.cc',
+ '../test/cctest/test-inobject-slack-tracking.cc',
+ '../test/cctest/test-isolate-independent-builtins.cc',
+ '../test/cctest/test-liveedit.cc',
+ '../test/cctest/test-lockers.cc',
+ '../test/cctest/test-log.cc',
+ '../test/cctest/test-managed.cc',
+ '../test/cctest/test-mementos.cc',
+ '../test/cctest/test-modules.cc',
+ '../test/cctest/test-object.cc',
+ '../test/cctest/test-orderedhashtable.cc',
+ '../test/cctest/test-parsing.cc',
+ '../test/cctest/test-platform.cc',
+ '../test/cctest/test-profile-generator.cc',
+ '../test/cctest/test-random-number-generator.cc',
+ '../test/cctest/test-regexp.cc',
+ '../test/cctest/test-representation.cc',
+ '../test/cctest/test-sampler-api.cc',
+ '../test/cctest/test-serialize.cc',
+ '../test/cctest/test-strings.cc',
+ '../test/cctest/test-symbols.cc',
+ '../test/cctest/test-strtod.cc',
+ '../test/cctest/test-thread-termination.cc',
+ '../test/cctest/test-threads.cc',
+ '../test/cctest/test-trace-event.cc',
+ '../test/cctest/test-traced-value.cc',
+ '../test/cctest/test-transitions.cc',
+ '../test/cctest/test-transitions.h',
+ '../test/cctest/test-typedarrays.cc',
+ '../test/cctest/test-types.cc',
+ '../test/cctest/test-unbound-queue.cc',
+ '../test/cctest/test-unboxed-doubles.cc',
+ '../test/cctest/test-unscopables-hidden-prototype.cc',
+ '../test/cctest/test-usecounters.cc',
+ '../test/cctest/test-utils.cc',
+ '../test/cctest/test-version.cc',
+ '../test/cctest/test-weakmaps.cc',
+ '../test/cctest/test-weaksets.cc',
+ '../test/cctest/trace-extension.cc',
+ '../test/cctest/trace-extension.h',
+ '../test/cctest/types-fuzz.h',
+ '../test/cctest/unicode-helpers.h',
+ '../test/cctest/wasm/test-c-wasm-entry.cc',
+ '../test/cctest/wasm/test-streaming-compilation.cc',
+ '../test/cctest/wasm/test-run-wasm.cc',
+ '../test/cctest/wasm/test-run-wasm-64.cc',
+ '../test/cctest/wasm/test-run-wasm-asmjs.cc',
+ '../test/cctest/wasm/test-run-wasm-atomics.cc',
+ '../test/cctest/wasm/test-run-wasm-interpreter.cc',
+ '../test/cctest/wasm/test-run-wasm-js.cc',
+ '../test/cctest/wasm/test-run-wasm-module.cc',
+ '../test/cctest/wasm/test-run-wasm-relocation.cc',
+ '../test/cctest/wasm/test-run-wasm-sign-extension.cc',
+ '../test/cctest/wasm/test-run-wasm-simd.cc',
+ '../test/cctest/wasm/test-wasm-breakpoints.cc',
+ "../test/cctest/wasm/test-wasm-codegen.cc",
+ '../test/cctest/wasm/test-wasm-interpreter-entry.cc',
+ '../test/cctest/wasm/test-wasm-stack.cc',
+ '../test/cctest/wasm/test-wasm-trap-position.cc',
+ '../test/cctest/wasm/wasm-run-utils.cc',
+ '../test/cctest/wasm/wasm-run-utils.h',
+ ],
+ 'cctest_sources_ia32': [
+ '../test/cctest/test-assembler-ia32.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-ia32.cc',
+ '../test/cctest/test-disasm-ia32.cc',
+ '../test/cctest/test-log-stack-tracer.cc',
+ '../test/cctest/test-run-wasm-relocation-ia32.cc',
+ ],
+ 'cctest_sources_x64': [
+ '../test/cctest/test-assembler-x64.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-x64.cc',
+ '../test/cctest/test-disasm-x64.cc',
+ '../test/cctest/test-macro-assembler-x64.cc',
+ '../test/cctest/test-log-stack-tracer.cc',
+ '../test/cctest/test-run-wasm-relocation-x64.cc',
+ ],
+ 'cctest_sources_arm': [
+ '../test/cctest/assembler-helper-arm.cc',
+ '../test/cctest/assembler-helper-arm.h',
+ '../test/cctest/test-assembler-arm.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-arm.cc',
+ '../test/cctest/test-disasm-arm.cc',
+ '../test/cctest/test-macro-assembler-arm.cc',
+ '../test/cctest/test-run-wasm-relocation-arm.cc',
+ '../test/cctest/test-sync-primitives-arm.cc',
+ ],
+ 'cctest_sources_arm64': [
+ '../test/cctest/test-utils-arm64.cc',
+ '../test/cctest/test-utils-arm64.h',
+ '../test/cctest/test-assembler-arm64.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-arm64.cc',
+ '../test/cctest/test-disasm-arm64.cc',
+ '../test/cctest/test-fuzz-arm64.cc',
+ '../test/cctest/test-javascript-arm64.cc',
+ '../test/cctest/test-js-arm64-variables.cc',
+ '../test/cctest/test-run-wasm-relocation-arm64.cc',
+ '../test/cctest/test-sync-primitives-arm64.cc',
+ ],
+ 'cctest_sources_s390': [
+ '../test/cctest/test-assembler-s390.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-disasm-s390.cc',
+ ],
+ 'cctest_sources_ppc': [
+ '../test/cctest/test-assembler-ppc.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-disasm-ppc.cc',
+ ],
+ 'cctest_sources_mips': [
+ '../test/cctest/test-assembler-mips.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-mips.cc',
+ '../test/cctest/test-disasm-mips.cc',
+ '../test/cctest/test-macro-assembler-mips.cc',
+ ],
+ 'cctest_sources_mipsel': [
+ '../test/cctest/test-assembler-mips.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-mips.cc',
+ '../test/cctest/test-disasm-mips.cc',
+ '../test/cctest/test-macro-assembler-mips.cc',
+ ],
+ 'cctest_sources_mips64': [
+ '../test/cctest/test-assembler-mips64.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-mips64.cc',
+ '../test/cctest/test-disasm-mips64.cc',
+ '../test/cctest/test-macro-assembler-mips64.cc',
+ ],
+ 'cctest_sources_mips64el': [
+ '../test/cctest/test-assembler-mips64.cc',
+ '../test/cctest/test-code-stubs.cc',
+ '../test/cctest/test-code-stubs.h',
+ '../test/cctest/test-code-stubs-mips64.cc',
+ '../test/cctest/test-disasm-mips64.cc',
+ '../test/cctest/test-macro-assembler-mips64.cc',
+ ],
+ },
+ 'includes': ['toolchain.gypi', 'features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'cctest',
+ 'type': 'executable',
+ 'dependencies': [
+ 'resources',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ '../test/common/wasm/flag-utils.h',
+ '../test/common/wasm/test-signatures.h',
+ '../test/common/wasm/wasm-macro-gen.h',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '<@(cctest_sources)',
+ '<(generated_file)',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="ia32"', {
+ 'sources': [
+ '<@(cctest_sources_ia32)',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [
+ '<@(cctest_sources_x64)',
+ ],
+ }],
+ ['v8_target_arch=="arm"', {
+ 'sources': [
+ '<@(cctest_sources_arm)',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [
+ '<@(cctest_sources_arm64)',
+ ],
+ }],
+ ['v8_target_arch=="s390"', {
+ 'sources': [
+ '<@(cctest_sources_s390)',
+ ],
+ }],
+ ['v8_target_arch=="s390x"', {
+ 'sources': [
+ '<@(cctest_sources_s390)',
+ ],
+ }],
+ ['v8_target_arch=="ppc"', {
+ 'sources': [
+ '<@(cctest_sources_ppc)',
+ ],
+ }],
+ ['v8_target_arch=="ppc64"', {
+ 'sources': [
+ '<@(cctest_sources_ppc)',
+ ],
+ }],
+ ['v8_target_arch=="mips"', {
+ 'sources': [
+ '<@(cctest_sources_mips)',
+ ],
+ }],
+ ['v8_target_arch=="mipsel"', {
+ 'sources': [
+ '<@(cctest_sources_mipsel)',
+ ],
+ }],
+ ['v8_target_arch=="mips64"', {
+ 'sources': [
+ '<@(cctest_sources_mips64)',
+ ],
+ }],
+ ['v8_target_arch=="mips64el"', {
+ 'sources': [
+ '<@(cctest_sources_mips64el)',
+ ],
+ }],
+ [ 'OS=="win"', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ # MSVS wants this for gay-{precision,shortest}.cc.
+ 'AdditionalOptions': ['/bigobj'],
+ },
+ },
+ }],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64" \
+ or v8_target_arch=="arm" or v8_target_arch=="arm64" \
+ or v8_target_arch=="s390" or v8_target_arch=="s390x" \
+ or v8_target_arch=="mips" or v8_target_arch=="mips64" \
+ or v8_target_arch=="mipsel" or v8_target_arch=="mips64el"', {
+ # disable fmadd/fmsub so that expected results match generated code in
+ # RunFloat64MulAndFloat64Add1 and friends.
+ 'cflags': ['-ffp-contract=off'],
+ }],
+ ['OS=="aix"', {
+ 'ldflags': [ '-Wl,-bbigtoc' ],
+ }],
+ ['component=="shared_library"', {
+ # cctest can't be built against a shared library, so we need to
+ # depend on the underlying static target in that case.
+ 'dependencies': ['v8.gyp:v8_maybe_snapshot'],
+ 'defines': [ 'BUILDING_V8_SHARED', ]
+ }, {
+ 'dependencies': ['v8.gyp:v8'],
+ }],
+ ['v8_use_snapshot=="true"', {
+ 'dependencies': ['v8.gyp:v8_initializers'],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'resources',
+ 'type': 'none',
+ 'variables': {
+ 'file_list': [
+ '../tools/splaytree.js',
+ '../tools/codemap.js',
+ '../tools/csvparser.js',
+ '../tools/consarray.js',
+ '../tools/profile.js',
+ '../tools/profile_view.js',
+ '../tools/arguments.js',
+ '../tools/logreader.js',
+ '../test/cctest/log-eq-of-logging-and-traversal.js',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'js2c',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(file_list)',
+ ],
+ 'outputs': [
+ '<(generated_file)',
+ ],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<@(_outputs)',
+ 'TEST', # type
+ '<@(file_list)',
+ ],
+ }
+ ],
+ },
+ {
+ 'target_name': 'generate-bytecode-expectations',
+ 'type': 'executable',
+ 'dependencies': [
+ 'v8.gyp:v8',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs+': [
+ '..',
+ ],
+ 'sources': [
+ '../test/cctest/interpreter/bytecode-expectations-printer.cc',
+ '../test/cctest/interpreter/bytecode-expectations-printer.h',
+ '../test/cctest/interpreter/generate-bytecode-expectations.cc',
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/gypfiles/coverage_wrapper.py b/deps/v8/gypfiles/coverage_wrapper.py
index d5fdee43cf..d5fdee43cf 100755..100644
--- a/deps/v8/gypfiles/coverage_wrapper.py
+++ b/deps/v8/gypfiles/coverage_wrapper.py
diff --git a/deps/v8/src/d8.gyp b/deps/v8/gypfiles/d8.gyp
index e6a40212cd..f593a2b044 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/gypfiles/d8.gyp
@@ -1,29 +1,6 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
{
'variables': {
@@ -32,7 +9,7 @@
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 1,
},
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
+ 'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'd8',
@@ -48,10 +25,10 @@
'<(DEPTH)',
],
'sources': [
- 'd8.h',
- 'd8.cc',
- 'd8-console.h',
- 'd8-console.cc',
+ '../src/d8.h',
+ '../src/d8.cc',
+ '../src/d8-console.h',
+ '../src/d8-console.cc',
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
],
'conditions': [
@@ -68,10 +45,10 @@
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android" \
or OS=="qnx" or OS=="aix")', {
- 'sources': [ 'd8-posix.cc', ]
+ 'sources': [ '../src/d8-posix.cc', ]
}],
[ 'OS=="win"', {
- 'sources': [ 'd8-windows.cc', ]
+ 'sources': [ '../src/d8-windows.cc', ]
}],
[ 'component!="shared_library"', {
'conditions': [
@@ -86,7 +63,7 @@
}],
['v8_enable_vtunejit==1', {
'dependencies': [
- '../src/third_party/vtune/v8vtune.gyp:v8_vtune',
+ 'v8vtune.gyp:v8_vtune',
],
}],
['v8_enable_i18n_support==1', {
@@ -107,8 +84,8 @@
'type': 'none',
'variables': {
'js_files': [
- 'd8.js',
- 'js/macros.py',
+ '../src/d8.js',
+ '../src/js/macros.py',
],
},
'conditions': [
@@ -139,23 +116,4 @@
],
},
],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'd8_run',
- 'type': 'none',
- 'dependencies': [
- 'd8',
- ],
- 'includes': [
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'd8.isolate',
- ],
- },
- ],
- }],
- ],
}
diff --git a/deps/v8/gypfiles/features.gypi b/deps/v8/gypfiles/features.gypi
index 8c99b4f086..69ff763be0 100644
--- a/deps/v8/gypfiles/features.gypi
+++ b/deps/v8/gypfiles/features.gypi
@@ -87,6 +87,9 @@
# Enable concurrent marking.
'v8_enable_concurrent_marking%': 1,
+ # Enables various testing features.
+ 'v8_enable_test_features%': 0,
+
# Controls the threshold for on-heap/off-heap Typed Arrays.
'v8_typed_array_max_size_in_heap%': 64,
@@ -119,6 +122,9 @@
['v8_trace_maps==1', {
'defines': ['V8_TRACE_MAPS',],
}],
+ ['v8_enable_test_features==1', {
+ 'defines': ['V8_ENABLE_ALLOCATION_TIMEOUT', 'V8_ENABLE_FORCE_SLOW_PATH'],
+ }],
['v8_enable_verify_predictable==1', {
'defines': ['VERIFY_PREDICTABLE',],
}],
@@ -164,7 +170,7 @@
}, # Debug
'Release': {
'variables': {
- 'v8_enable_handle_zapping%': 0,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/gypfiles/fuzzer.gyp
index 0c54211290..f65b7dc6d0 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/gypfiles/fuzzer.gyp
@@ -6,7 +6,7 @@
'variables': {
'v8_code': 1,
},
- 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
+ 'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'v8_simple_json_fuzzer',
@@ -15,24 +15,24 @@
'json_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'json_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'json.cc',
+ 'sources': [
+ '../test/fuzzer/json.cc',
],
},
{
@@ -42,175 +42,171 @@
'parser_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'parser_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'parser.cc',
+ 'sources': [
+ '../test/fuzzer/parser.cc',
],
},
{
- 'target_name': 'v8_simple_regexp_fuzzer',
+ 'target_name': 'v8_simple_regexp_builtins_fuzzer',
'type': 'executable',
'dependencies': [
- 'regexp_fuzzer_lib',
+ 'regexp_builtins_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
- 'target_name': 'regexp_fuzzer_lib',
+ 'target_name': 'regexp_builtins_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'regexp.cc',
+ 'sources': [
+ '../test/fuzzer/regexp-builtins.cc',
+ '../test/fuzzer/regexp_builtins/mjsunit.js.h',
],
},
{
- 'target_name': 'v8_simple_multi_return_fuzzer',
+ 'target_name': 'v8_simple_regexp_fuzzer',
'type': 'executable',
'dependencies': [
- 'multi_return_fuzzer_lib',
+ 'regexp_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
- 'target_name': 'multi_return_fuzzer_lib',
+ 'target_name': 'regexp_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- '../compiler/c-signature.h',
- '../compiler/call-helper.h',
- '../compiler/raw-machine-assembler-tester.h',
- 'multi-return.cc',
+ 'sources': [
+ '../test/fuzzer/regexp.cc',
],
},
{
- 'target_name': 'v8_simple_wasm_fuzzer',
+ 'target_name': 'v8_simple_multi_return_fuzzer',
'type': 'executable',
'dependencies': [
- 'wasm_fuzzer_lib',
+ 'multi_return_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
- 'target_name': 'wasm_fuzzer_lib',
+ 'target_name': 'multi_return_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/cctest/compiler/c-signature.h',
+ '../test/cctest/compiler/call-helper.h',
+ '../test/cctest/compiler/raw-machine-assembler-tester.h',
+ '../test/fuzzer/multi-return.cc',
],
},
{
- 'target_name': 'v8_simple_wasm_async_fuzzer',
+ 'target_name': 'v8_simple_wasm_fuzzer',
'type': 'executable',
'dependencies': [
- 'wasm_async_fuzzer_lib',
+ 'wasm_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
- 'target_name': 'wasm_async_fuzzer_lib',
+ 'target_name': 'wasm_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-async.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
- 'target_name': 'v8_simple_wasm_call_fuzzer',
+ 'target_name': 'v8_simple_wasm_async_fuzzer',
'type': 'executable',
'dependencies': [
- 'wasm_call_fuzzer_lib',
+ 'wasm_async_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
- 'target_name': 'wasm_call_fuzzer_lib',
+ 'target_name': 'wasm_async_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-call.cc',
- '../common/wasm/test-signatures.h',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-async.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -220,29 +216,29 @@
'wasm_code_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_code_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-code.cc',
- '../common/wasm/test-signatures.h',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-code.cc',
+ '../test/common/wasm/test-signatures.h',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -252,29 +248,29 @@
'wasm_compile_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_compile_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-compile.cc',
- '../common/wasm/test-signatures.h',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-compile.cc',
+ '../test/common/wasm/test-signatures.h',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -284,28 +280,28 @@
'wasm_data_section_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_data_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-data-section.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-data-section.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -315,28 +311,28 @@
'wasm_function_sigs_section_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_function_sigs_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-function-sigs-section.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-function-sigs-section.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -346,28 +342,28 @@
'wasm_globals_section_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_globals_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-globals-section.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-globals-section.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -377,28 +373,28 @@
'wasm_imports_section_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_imports_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-imports-section.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-imports-section.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -408,28 +404,28 @@
'wasm_memory_section_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_memory_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-memory-section.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-memory-section.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -439,28 +435,28 @@
'wasm_names_section_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_names_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-names-section.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-names-section.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@@ -470,44 +466,44 @@
'wasm_types_section_fuzzer_lib',
],
'include_dirs': [
- '../..',
+ '..',
],
'sources': [
- 'fuzzer.cc',
+ '../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_types_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'wasm-types-section.cc',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- 'wasm-fuzzer-common.cc',
- 'wasm-fuzzer-common.h',
+ 'sources': [
+ '../test/fuzzer/wasm-types-section.cc',
+ '../test/common/wasm/wasm-module-runner.cc',
+ '../test/common/wasm/wasm-module-runner.h',
+ '../test/fuzzer/wasm-fuzzer-common.cc',
+ '../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
'target_name': 'fuzzer_support',
'type': 'static_library',
'dependencies': [
- '../../src/v8.gyp:v8',
- '../../src/v8.gyp:v8_libbase',
- '../../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8_libplatform',
],
'include_dirs': [
- '../..',
+ '..',
],
- 'sources': [ ### gcmole(all) ###
- 'fuzzer-support.cc',
- 'fuzzer-support.h',
+ 'sources': [
+ '../test/fuzzer/fuzzer-support.cc',
+ '../test/fuzzer/fuzzer-support.h',
],
'conditions': [
['v8_enable_i18n_support==1', {
@@ -519,26 +515,4 @@
],
},
],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'fuzzer_run',
- 'type': 'none',
- 'dependencies': [
- 'v8_simple_json_fuzzer',
- 'v8_simple_parser_fuzzer',
- 'v8_simple_regexp_fuzzer',
- 'v8_simple_wasm_fuzzer',
- ],
- 'includes': [
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'fuzzer.isolate',
- ],
- },
- ],
- }],
- ],
}
diff --git a/deps/v8/gypfiles/gmock.gyp b/deps/v8/gypfiles/gmock.gyp
new file mode 100644
index 0000000000..a80387298b
--- /dev/null
+++ b/deps/v8/gypfiles/gmock.gyp
@@ -0,0 +1,72 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'gmock',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'gtest.gyp:gtest',
+ ],
+ 'sources': [
+ # Sources based on files in r173 of gmock.
+ '../testing/gmock/include/gmock/gmock-actions.h',
+ '../testing/gmock/include/gmock/gmock-cardinalities.h',
+ '../testing/gmock/include/gmock/gmock-generated-actions.h',
+ '../testing/gmock/include/gmock/gmock-generated-function-mockers.h',
+ '../testing/gmock/include/gmock/gmock-generated-matchers.h',
+ '../testing/gmock/include/gmock/gmock-generated-nice-strict.h',
+ '../testing/gmock/include/gmock/gmock-matchers.h',
+ '../testing/gmock/include/gmock/gmock-spec-builders.h',
+ '../testing/gmock/include/gmock/gmock.h',
+ '../testing/gmock/include/gmock/internal/gmock-generated-internal-utils.h',
+ '../testing/gmock/include/gmock/internal/gmock-internal-utils.h',
+ '../testing/gmock/include/gmock/internal/gmock-port.h',
+ '../testing/gmock/src/gmock-all.cc',
+ '../testing/gmock/src/gmock-cardinalities.cc',
+ '../testing/gmock/src/gmock-internal-utils.cc',
+ '../testing/gmock/src/gmock-matchers.cc',
+ '../testing/gmock/src/gmock-spec-builders.cc',
+ '../testing/gmock/src/gmock.cc',
+ '../testing/gmock-support.h', # gMock helpers
+ '../testing/gmock_custom/gmock/internal/custom/gmock-port.h',
+ ],
+ 'sources!': [
+ '../testing/gmock/src/gmock-all.cc', # Not needed by our build.
+ ],
+ 'include_dirs': [
+ '../testing/gmock_custom',
+ '../testing/gmock',
+ '../testing/gmock/include',
+ ],
+ 'all_dependent_settings': {
+ 'include_dirs': [
+ '../testing/gmock_custom',
+ '../testing/gmock/include', # So that gmock headers can find themselves.
+ ],
+ },
+ 'export_dependent_settings': [
+ 'gtest.gyp:gtest',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'gmock_main',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'gmock',
+ ],
+ 'sources': [
+ '../testing/gmock/src/gmock_main.cc',
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/testing/gtest.gyp b/deps/v8/gypfiles/gtest.gyp
index a94ee884fe..82ae105293 100644
--- a/deps/v8/testing/gtest.gyp
+++ b/deps/v8/gypfiles/gtest.gyp
@@ -9,42 +9,42 @@
'toolsets': ['host', 'target'],
'type': 'static_library',
'sources': [
- 'gtest/include/gtest/gtest-death-test.h',
- 'gtest/include/gtest/gtest-message.h',
- 'gtest/include/gtest/gtest-param-test.h',
- 'gtest/include/gtest/gtest-printers.h',
- 'gtest/include/gtest/gtest-spi.h',
- 'gtest/include/gtest/gtest-test-part.h',
- 'gtest/include/gtest/gtest-typed-test.h',
- 'gtest/include/gtest/gtest.h',
- 'gtest/include/gtest/gtest_pred_impl.h',
- 'gtest/include/gtest/internal/gtest-death-test-internal.h',
- 'gtest/include/gtest/internal/gtest-filepath.h',
- 'gtest/include/gtest/internal/gtest-internal.h',
- 'gtest/include/gtest/internal/gtest-linked_ptr.h',
- 'gtest/include/gtest/internal/gtest-param-util-generated.h',
- 'gtest/include/gtest/internal/gtest-param-util.h',
- 'gtest/include/gtest/internal/gtest-port.h',
- 'gtest/include/gtest/internal/gtest-string.h',
- 'gtest/include/gtest/internal/gtest-tuple.h',
- 'gtest/include/gtest/internal/gtest-type-util.h',
- 'gtest/src/gtest-all.cc',
- 'gtest/src/gtest-death-test.cc',
- 'gtest/src/gtest-filepath.cc',
- 'gtest/src/gtest-internal-inl.h',
- 'gtest/src/gtest-port.cc',
- 'gtest/src/gtest-printers.cc',
- 'gtest/src/gtest-test-part.cc',
- 'gtest/src/gtest-typed-test.cc',
- 'gtest/src/gtest.cc',
- 'gtest-support.h',
+ '../testing/gtest/include/gtest/gtest-death-test.h',
+ '../testing/gtest/include/gtest/gtest-message.h',
+ '../testing/gtest/include/gtest/gtest-param-test.h',
+ '../testing/gtest/include/gtest/gtest-printers.h',
+ '../testing/gtest/include/gtest/gtest-spi.h',
+ '../testing/gtest/include/gtest/gtest-test-part.h',
+ '../testing/gtest/include/gtest/gtest-typed-test.h',
+ '../testing/gtest/include/gtest/gtest.h',
+ '../testing/gtest/include/gtest/gtest_pred_impl.h',
+ '../testing/gtest/include/gtest/internal/gtest-death-test-internal.h',
+ '../testing/gtest/include/gtest/internal/gtest-filepath.h',
+ '../testing/gtest/include/gtest/internal/gtest-internal.h',
+ '../testing/gtest/include/gtest/internal/gtest-linked_ptr.h',
+ '../testing/gtest/include/gtest/internal/gtest-param-util-generated.h',
+ '../testing/gtest/include/gtest/internal/gtest-param-util.h',
+ '../testing/gtest/include/gtest/internal/gtest-port.h',
+ '../testing/gtest/include/gtest/internal/gtest-string.h',
+ '../testing/gtest/include/gtest/internal/gtest-tuple.h',
+ '../testing/gtest/include/gtest/internal/gtest-type-util.h',
+ '../testing/gtest/src/gtest-all.cc',
+ '../testing/gtest/src/gtest-death-test.cc',
+ '../testing/gtest/src/gtest-filepath.cc',
+ '../testing/gtest/src/gtest-internal-inl.h',
+ '../testing/gtest/src/gtest-port.cc',
+ '../testing/gtest/src/gtest-printers.cc',
+ '../testing/gtest/src/gtest-test-part.cc',
+ '../testing/gtest/src/gtest-typed-test.cc',
+ '../testing/gtest/src/gtest.cc',
+ '../testing/gtest-support.h',
],
'sources!': [
- 'gtest/src/gtest-all.cc', # Not needed by our build.
+ '../testing/gtest/src/gtest-all.cc', # Not needed by our build.
],
'include_dirs': [
- 'gtest',
- 'gtest/include',
+ '../testing/gtest',
+ '../testing/gtest/include',
],
'dependencies': [
'gtest_prod',
@@ -78,7 +78,7 @@
'UNIT_TEST',
],
'include_dirs': [
- 'gtest/include', # So that gtest headers can find themselves.
+ '../testing/gtest/include', # So that gtest headers can find themselves.
],
'target_conditions': [
['_type=="executable"', {
@@ -107,7 +107,7 @@
'gtest',
],
'sources': [
- 'gtest/src/gtest_main.cc',
+ '../testing/gtest/src/gtest_main.cc',
],
},
{
@@ -115,7 +115,7 @@
'toolsets': ['host', 'target'],
'type': 'none',
'sources': [
- 'gtest/include/gtest/gtest_prod.h',
+ '../testing/gtest/include/gtest/gtest_prod.h',
],
},
],
diff --git a/deps/v8/gypfiles/gyp_v8 b/deps/v8/gypfiles/gyp_v8
index 5215f51352..62e13d86e6 100755..100644
--- a/deps/v8/gypfiles/gyp_v8
+++ b/deps/v8/gypfiles/gyp_v8
@@ -48,7 +48,8 @@ import gyp
# Add paths so that pymod_do_main(...) can import files.
sys.path.insert(
1, os.path.abspath(os.path.join(v8_root, 'tools', 'generate_shim_headers')))
-
+sys.path.append(
+ os.path.abspath(os.path.join(v8_root, 'third_party', 'binutils')))
def GetOutputDirectory():
"""Returns the output directory that GYP will use."""
@@ -108,14 +109,19 @@ def run_gyp(args):
if __name__ == '__main__':
args = sys.argv[1:]
- gyp_chromium_no_action = os.environ.get('GYP_CHROMIUM_NO_ACTION', '1')
- if gyp_chromium_no_action != '0':
- print 'GYP is now disabled by default.\n'
- print 'If you really want to run this, set the environment variable '
- print 'GYP_CHROMIUM_NO_ACTION=0.'
+ gyp_chromium_no_action = os.environ.get('GYP_CHROMIUM_NO_ACTION')
+ if gyp_chromium_no_action == '1':
+ print 'Skipping gyp_v8 due to GYP_CHROMIUM_NO_ACTION env var.'
sys.exit(0)
running_as_hook = '--running-as-hook'
+ if running_as_hook in args and gyp_chromium_no_action != '0':
+ print 'GYP is now disabled by default in runhooks.\n'
+ print 'If you really want to run this, either run '
+ print '`python gypfiles/gyp_v8` explicitly by hand '
+ print 'or set the environment variable GYP_CHROMIUM_NO_ACTION=0.'
+ sys.exit(0)
+
if running_as_hook in args:
args.remove(running_as_hook)
diff --git a/deps/v8/gypfiles/inspector-test.gyp b/deps/v8/gypfiles/inspector-test.gyp
new file mode 100644
index 0000000000..c8cc5561a4
--- /dev/null
+++ b/deps/v8/gypfiles/inspector-test.gyp
@@ -0,0 +1,39 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['toolchain.gypi', 'features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'inspector-test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ '../test/inspector/inspector-test.cc',
+ '../test/inspector/isolate-data.cc',
+ '../test/inspector/isolate-data.h',
+ '../test/inspector/task-runner.cc',
+ '../test/inspector/task-runner.h',
+ ],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/src/inspector/inspector.gyp b/deps/v8/gypfiles/inspector.gyp
index 3d59cc089d..c0b503d9ce 100644
--- a/deps/v8/src/inspector/inspector.gyp
+++ b/deps/v8/gypfiles/inspector.gyp
@@ -4,11 +4,12 @@
{
'variables': {
- 'protocol_path': '../../third_party/inspector_protocol',
+ 'protocol_path': '../third_party/inspector_protocol',
+ 'inspector_path': '../src/inspector',
},
'includes': [
'inspector.gypi',
- '<(PRODUCT_DIR)/../../../third_party/inspector_protocol/inspector_protocol.gypi',
+ '../third_party/inspector_protocol/inspector_protocol.gypi',
],
'targets': [
{ 'target_name': 'inspector_injected_script',
@@ -18,7 +19,7 @@
{
'action_name': 'convert_js_to_cpp_char_array',
'inputs': [
- 'build/xxd.py',
+ '<(inspector_path)/build/xxd.py',
'<(inspector_injected_script_source)',
],
'outputs': [
@@ -26,9 +27,9 @@
],
'action': [
'python',
- 'build/xxd.py',
+ '<(inspector_path)/build/xxd.py',
'InjectedScriptSource_js',
- 'injected-script-source.js',
+ '<(inspector_path)/injected-script-source.js',
'<@(_outputs)'
],
},
@@ -43,7 +44,7 @@
{
'action_name': 'protocol_compatibility',
'inputs': [
- 'js_protocol.json',
+ '<(inspector_path)/js_protocol.json',
],
'outputs': [
'<@(SHARED_INTERMEDIATE_DIR)/src/js_protocol.stamp',
@@ -52,7 +53,7 @@
'python',
'<(protocol_path)/CheckProtocolCompatibility.py',
'--stamp', '<@(_outputs)',
- 'js_protocol.json',
+ '<(inspector_path)/js_protocol.json',
],
'message': 'Generating inspector protocol sources from protocol json definition',
},
@@ -66,8 +67,8 @@
{
'action_name': 'protocol_generated_sources',
'inputs': [
- 'js_protocol.json',
- 'inspector_protocol_config.json',
+ '<(inspector_path)/js_protocol.json',
+ '<(inspector_path)/inspector_protocol_config.json',
'<@(inspector_protocol_files)',
],
'outputs': [
@@ -76,9 +77,9 @@
'action': [
'python',
'<(protocol_path)/CodeGenerator.py',
- '--jinja_dir', '../../third_party',
+ '--jinja_dir', '../third_party',
'--output_base', '<(SHARED_INTERMEDIATE_DIR)/src/inspector',
- '--config', 'inspector_protocol_config.json',
+ '--config', '<(inspector_path)/inspector_protocol_config.json',
],
'message': 'Generating inspector protocol sources from protocol json',
},
diff --git a/deps/v8/gypfiles/inspector.gypi b/deps/v8/gypfiles/inspector.gypi
new file mode 100644
index 0000000000..00cbbfaecb
--- /dev/null
+++ b/deps/v8/gypfiles/inspector.gypi
@@ -0,0 +1,90 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'inspector_generated_sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Forward.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.h',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.h',
+ '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Debugger.h',
+ '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Runtime.h',
+ '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Schema.h',
+ ],
+
+ 'inspector_injected_script_source': '../src/inspector/injected-script-source.js',
+ 'inspector_generated_injected_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/injected-script-source.h',
+
+ 'inspector_all_sources': [
+ '<@(inspector_generated_sources)',
+ '<(inspector_generated_injected_script)',
+ '../include/v8-inspector.h',
+ '../include/v8-inspector-protocol.h',
+ '../src/inspector/injected-script.cc',
+ '../src/inspector/injected-script.h',
+ '../src/inspector/inspected-context.cc',
+ '../src/inspector/inspected-context.h',
+ '../src/inspector/remote-object-id.cc',
+ '../src/inspector/remote-object-id.h',
+ '../src/inspector/search-util.cc',
+ '../src/inspector/search-util.h',
+ '../src/inspector/string-16.cc',
+ '../src/inspector/string-16.h',
+ '../src/inspector/string-util.cc',
+ '../src/inspector/string-util.h',
+ '../src/inspector/test-interface.cc',
+ '../src/inspector/test-interface.h',
+ '../src/inspector/v8-console.cc',
+ '../src/inspector/v8-console.h',
+ '../src/inspector/v8-console-agent-impl.cc',
+ '../src/inspector/v8-console-agent-impl.h',
+ '../src/inspector/v8-console-message.cc',
+ '../src/inspector/v8-console-message.h',
+ '../src/inspector/v8-debugger.cc',
+ '../src/inspector/v8-debugger.h',
+ '../src/inspector/v8-debugger-agent-impl.cc',
+ '../src/inspector/v8-debugger-agent-impl.h',
+ '../src/inspector/v8-debugger-script.cc',
+ '../src/inspector/v8-debugger-script.h',
+ '../src/inspector/v8-function-call.cc',
+ '../src/inspector/v8-function-call.h',
+ '../src/inspector/v8-heap-profiler-agent-impl.cc',
+ '../src/inspector/v8-heap-profiler-agent-impl.h',
+ '../src/inspector/v8-injected-script-host.cc',
+ '../src/inspector/v8-injected-script-host.h',
+ '../src/inspector/v8-inspector-impl.cc',
+ '../src/inspector/v8-inspector-impl.h',
+ '../src/inspector/v8-inspector-session-impl.cc',
+ '../src/inspector/v8-inspector-session-impl.h',
+ '../src/inspector/v8-internal-value-type.cc',
+ '../src/inspector/v8-internal-value-type.h',
+ '../src/inspector/v8-profiler-agent-impl.cc',
+ '../src/inspector/v8-profiler-agent-impl.h',
+ '../src/inspector/v8-regex.cc',
+ '../src/inspector/v8-regex.h',
+ '../src/inspector/v8-runtime-agent-impl.cc',
+ '../src/inspector/v8-runtime-agent-impl.h',
+ '../src/inspector/v8-schema-agent-impl.cc',
+ '../src/inspector/v8-schema-agent-impl.h',
+ '../src/inspector/v8-stack-trace-impl.cc',
+ '../src/inspector/v8-stack-trace-impl.h',
+ '../src/inspector/v8-value-utils.cc',
+ '../src/inspector/v8-value-utils.h',
+ '../src/inspector/wasm-translation.cc',
+ '../src/inspector/wasm-translation.h',
+ ]
+ }
+}
diff --git a/deps/v8/gypfiles/isolate.gypi b/deps/v8/gypfiles/isolate.gypi
deleted file mode 100644
index 3e85b530e2..0000000000
--- a/deps/v8/gypfiles/isolate.gypi
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# This file is meant to be included into a target to provide a rule
-# to "build" .isolate files into a .isolated file.
-#
-# To use this, create a gyp target with the following form:
-# 'conditions': [
-# ['test_isolation_mode != "noop"', {
-# 'targets': [
-# {
-# 'target_name': 'foo_test_run',
-# 'type': 'none',
-# 'dependencies': [
-# 'foo_test',
-# ],
-# 'includes': [
-# '../gypfiles/isolate.gypi',
-# ],
-# 'sources': [
-# 'foo_test.isolate',
-# ],
-# },
-# ],
-# }],
-# ],
-#
-# Note: foo_test.isolate is included and a source file. It is an inherent
-# property of the .isolate format. This permits to define GYP variables but is
-# a stricter format than GYP so isolate.py can read it.
-#
-# The generated .isolated file will be:
-# <(PRODUCT_DIR)/foo_test.isolated
-#
-# See http://dev.chromium.org/developers/testing/isolated-testing/for-swes
-# for more information.
-
-{
- 'rules': [
- {
- 'rule_name': 'isolate',
- 'extension': 'isolate',
- 'inputs': [
- # Files that are known to be involved in this step.
- '<(DEPTH)/tools/isolate_driver.py',
- '<(DEPTH)/tools/swarming_client/isolate.py',
- '<(DEPTH)/tools/swarming_client/run_isolated.py',
- ],
- 'outputs': [
- '<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).isolated',
- ],
- 'action': [
- 'python',
- '<(DEPTH)/tools/isolate_driver.py',
- '<(test_isolation_mode)',
- '--isolated', '<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).isolated',
- '--isolate', '<(RULE_INPUT_PATH)',
-
- # Variables should use the -V FOO=<(FOO) form so frequent values,
- # like '0' or '1', aren't stripped out by GYP. Run 'isolate.py help'
- # for more details.
-
- # Path variables are used to replace file paths when loading a .isolate
- # file
- '--path-variable', 'DEPTH', '<(DEPTH)',
- '--path-variable', 'PRODUCT_DIR', '<(PRODUCT_DIR)',
-
- '--config-variable', 'CONFIGURATION_NAME=<(CONFIGURATION_NAME)',
- '--config-variable', 'OS=<(OS)',
- '--config-variable', 'asan=<(asan)',
- '--config-variable', 'cfi_vptr=<(cfi_vptr)',
- '--config-variable', 'gcmole=<(gcmole)',
- '--config-variable', 'has_valgrind=<(has_valgrind)',
- '--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
- '--config-variable', 'msan=<(msan)',
- '--config-variable', 'tsan=<(tsan)',
- '--config-variable', 'coverage=<(coverage)',
- '--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
- '--config-variable', 'component=<(component)',
- '--config-variable', 'target_arch=<(target_arch)',
- '--config-variable', 'ubsan_vptr=0',
- '--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
- '--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
- ],
- 'conditions': [
- ['OS=="win"', {
- 'action': [
- '--config-variable', 'msvs_version=2013',
- ],
- }, {
- 'action': [
- '--config-variable', 'msvs_version=0',
- ],
- }],
- ],
- },
- ],
-}
diff --git a/deps/v8/gypfiles/landmine_utils.py b/deps/v8/gypfiles/landmine_utils.py
deleted file mode 100644
index 8bdc2b648b..0000000000
--- a/deps/v8/gypfiles/landmine_utils.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import functools
-import logging
-import os
-import shlex
-import sys
-
-
-def memoize(default=None):
- """This decorator caches the return value of a parameterless pure function"""
- def memoizer(func):
- val = []
- @functools.wraps(func)
- def inner():
- if not val:
- ret = func()
- val.append(ret if ret is not None else default)
- if logging.getLogger().isEnabledFor(logging.INFO):
- print '%s -> %r' % (func.__name__, val[0])
- return val[0]
- return inner
- return memoizer
-
-
-@memoize()
-def IsWindows():
- return sys.platform in ['win32', 'cygwin']
-
-
-@memoize()
-def IsLinux():
- return sys.platform.startswith(('linux', 'freebsd'))
-
-
-@memoize()
-def IsMac():
- return sys.platform == 'darwin'
-
-
-@memoize()
-def gyp_defines():
- """Parses and returns GYP_DEFINES env var as a dictionary."""
- return dict(arg.split('=', 1)
- for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
-
-
-@memoize()
-def gyp_generator_flags():
- """Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
- return dict(arg.split('=', 1)
- for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))
-
-
-@memoize()
-def gyp_msvs_version():
- return os.environ.get('GYP_MSVS_VERSION', '')
-
-
-@memoize()
-def distributor():
- """
- Returns a string which is the distributed build engine in use (if any).
- Possible values: 'goma', 'ib', ''
- """
- if 'goma' in gyp_defines():
- return 'goma'
- elif IsWindows():
- if 'CHROME_HEADLESS' in os.environ:
- return 'ib' # use (win and !goma and headless) as approximation of ib
-
-
-@memoize()
-def platform():
- """
- Returns a string representing the platform this build is targeted for.
- Possible values: 'win', 'mac', 'linux', 'ios', 'android'
- """
- if 'OS' in gyp_defines():
- if 'android' in gyp_defines()['OS']:
- return 'android'
- else:
- return gyp_defines()['OS']
- elif IsWindows():
- return 'win'
- elif IsLinux():
- return 'linux'
- else:
- return 'mac'
-
-
-@memoize()
-def builder():
- """
- Returns a string representing the build engine (not compiler) to use.
- Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
- """
- if 'GYP_GENERATORS' in os.environ:
- # for simplicity, only support the first explicit generator
- generator = os.environ['GYP_GENERATORS'].split(',')[0]
- if generator.endswith('-android'):
- return generator.split('-')[0]
- elif generator.endswith('-ninja'):
- return 'ninja'
- else:
- return generator
- else:
- if platform() == 'android':
- # Good enough for now? Do any android bots use make?
- return 'make'
- elif platform() == 'ios':
- return 'xcode'
- elif IsWindows():
- return 'msvs'
- elif IsLinux():
- return 'make'
- elif IsMac():
- return 'xcode'
- else:
- assert False, 'Don\'t know what builder we\'re using!'
diff --git a/deps/v8/gypfiles/landmines.py b/deps/v8/gypfiles/landmines.py
deleted file mode 100755
index 2a81c66d1a..0000000000
--- a/deps/v8/gypfiles/landmines.py
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-This script runs every build as the first hook (See DEPS). If it detects that
-the build should be clobbered, it will delete the contents of the build
-directory.
-
-A landmine is tripped when a builder checks out a different revision, and the
-diff between the new landmines and the old ones is non-null. At this point, the
-build is clobbered.
-"""
-
-import difflib
-import errno
-import gyp_environment
-import logging
-import optparse
-import os
-import re
-import shutil
-import sys
-import subprocess
-import time
-
-import landmine_utils
-
-
-SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-
-
-def get_build_dir(build_tool, is_iphone=False):
- """
- Returns output directory absolute path dependent on build and targets.
- Examples:
- r'c:\b\build\slave\win\build\src\out'
- '/mnt/data/b/build/slave/linux/build/src/out'
- '/b/build/slave/ios_rel_device/build/src/xcodebuild'
-
- Keep this function in sync with tools/build/scripts/slave/compile.py
- """
- ret = None
- if build_tool == 'xcode':
- ret = os.path.join(SRC_DIR, 'xcodebuild')
- elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
- if 'CHROMIUM_OUT_DIR' in os.environ:
- output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
- if not output_dir:
- raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
- else:
- output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
- ret = os.path.join(SRC_DIR, output_dir)
- elif build_tool in ['msvs', 'vs', 'ib']:
- ret = os.path.join(SRC_DIR, 'build')
- else:
- raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
- return os.path.abspath(ret)
-
-
-def extract_gn_build_commands(build_ninja_file):
- """Extracts from a build.ninja the commands to run GN.
-
- The commands to run GN are the gn rule and build.ninja build step at the
- top of the build.ninja file. We want to keep these when deleting GN builds
- since we want to preserve the command-line flags to GN.
-
- On error, returns the empty string."""
- result = ""
- with open(build_ninja_file, 'r') as f:
- # Read until the second blank line. The first thing GN writes to the file
- # is the "rule gn" and the second is the section for "build build.ninja",
- # separated by blank lines.
- num_blank_lines = 0
- while num_blank_lines < 2:
- line = f.readline()
- if len(line) == 0:
- return '' # Unexpected EOF.
- result += line
- if line[0] == '\n':
- num_blank_lines = num_blank_lines + 1
- return result
-
-def delete_build_dir(build_dir):
- # GN writes a build.ninja.d file. Note that not all GN builds have args.gn.
- build_ninja_d_file = os.path.join(build_dir, 'build.ninja.d')
- if not os.path.exists(build_ninja_d_file):
- shutil.rmtree(build_dir)
- return
-
- # GN builds aren't automatically regenerated when you sync. To avoid
- # messing with the GN workflow, erase everything but the args file, and
- # write a dummy build.ninja file that will automatically rerun GN the next
- # time Ninja is run.
- build_ninja_file = os.path.join(build_dir, 'build.ninja')
- build_commands = extract_gn_build_commands(build_ninja_file)
-
- try:
- gn_args_file = os.path.join(build_dir, 'args.gn')
- with open(gn_args_file, 'r') as f:
- args_contents = f.read()
- except IOError:
- args_contents = ''
-
- shutil.rmtree(build_dir)
-
- # Put back the args file (if any).
- os.mkdir(build_dir)
- if args_contents != '':
- with open(gn_args_file, 'w') as f:
- f.write(args_contents)
-
- # Write the build.ninja file sufficiently to regenerate itself.
- with open(os.path.join(build_dir, 'build.ninja'), 'w') as f:
- if build_commands != '':
- f.write(build_commands)
- else:
- # Couldn't parse the build.ninja file, write a default thing.
- f.write('''rule gn
-command = gn -q gen //out/%s/
-description = Regenerating ninja files
-
-build build.ninja: gn
-generator = 1
-depfile = build.ninja.d
-''' % (os.path.split(build_dir)[1]))
-
- # Write a .d file for the build which references a nonexistant file. This
- # will make Ninja always mark the build as dirty.
- with open(build_ninja_d_file, 'w') as f:
- f.write('build.ninja: nonexistant_file.gn\n')
-
-
-def needs_clobber(landmines_path, new_landmines):
- if os.path.exists(landmines_path):
- with open(landmines_path, 'r') as f:
- old_landmines = f.readlines()
- if old_landmines != new_landmines:
- old_date = time.ctime(os.stat(landmines_path).st_ctime)
- diff = difflib.unified_diff(old_landmines, new_landmines,
- fromfile='old_landmines', tofile='new_landmines',
- fromfiledate=old_date, tofiledate=time.ctime(), n=0)
- sys.stdout.write('Clobbering due to:\n')
- sys.stdout.writelines(diff)
- return True
- else:
- sys.stdout.write('Clobbering due to missing landmines file.\n')
- return True
- return False
-
-
-def clobber_if_necessary(new_landmines):
- """Does the work of setting, planting, and triggering landmines."""
- out_dir = get_build_dir(landmine_utils.builder())
- landmines_path = os.path.normpath(os.path.join(out_dir, '..', '.landmines'))
- try:
- os.makedirs(out_dir)
- except OSError as e:
- if e.errno == errno.EEXIST:
- pass
-
- if needs_clobber(landmines_path, new_landmines):
- # Clobber contents of build directory but not directory itself: some
- # checkouts have the build directory mounted.
- for f in os.listdir(out_dir):
- path = os.path.join(out_dir, f)
- if os.path.basename(out_dir) == 'build':
- # Only delete build directories and files for MSVS builds as the folder
- # shares some checked out files and directories.
- if (os.path.isdir(path) and
- re.search(r'(?:[Rr]elease)|(?:[Dd]ebug)', f)):
- delete_build_dir(path)
- elif (os.path.isfile(path) and
- (path.endswith('.sln') or
- path.endswith('.vcxproj') or
- path.endswith('.vcxproj.user'))):
- os.unlink(path)
- else:
- if os.path.isfile(path):
- os.unlink(path)
- elif os.path.isdir(path):
- delete_build_dir(path)
- if os.path.basename(out_dir) == 'xcodebuild':
- # Xcodebuild puts an additional project file structure into build,
- # while the output folder is xcodebuild.
- project_dir = os.path.join(SRC_DIR, 'build', 'all.xcodeproj')
- if os.path.exists(project_dir) and os.path.isdir(project_dir):
- delete_build_dir(project_dir)
-
- # Save current set of landmines for next time.
- with open(landmines_path, 'w') as f:
- f.writelines(new_landmines)
-
-
-def process_options():
- """Returns a list of landmine emitting scripts."""
- parser = optparse.OptionParser()
- parser.add_option(
- '-s', '--landmine-scripts', action='append',
- default=[os.path.join(SRC_DIR, 'gypfiles', 'get_landmines.py')],
- help='Path to the script which emits landmines to stdout. The target '
- 'is passed to this script via option -t. Note that an extra '
- 'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
- parser.add_option('-v', '--verbose', action='store_true',
- default=('LANDMINES_VERBOSE' in os.environ),
- help=('Emit some extra debugging information (default off). This option '
- 'is also enabled by the presence of a LANDMINES_VERBOSE environment '
- 'variable.'))
-
- options, args = parser.parse_args()
-
- if args:
- parser.error('Unknown arguments %s' % args)
-
- logging.basicConfig(
- level=logging.DEBUG if options.verbose else logging.ERROR)
-
- extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
- if extra_script:
- return options.landmine_scripts + [extra_script]
- else:
- return options.landmine_scripts
-
-
-def main():
- landmine_scripts = process_options()
-
- if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
- return 0
-
- gyp_environment.set_environment()
-
- landmines = []
- for s in landmine_scripts:
- proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
- output, _ = proc.communicate()
- landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
- clobber_if_necessary(landmines)
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/deps/v8/gypfiles/mkgrokdump.gyp b/deps/v8/gypfiles/mkgrokdump.gyp
new file mode 100644
index 0000000000..75e8fc75cd
--- /dev/null
+++ b/deps/v8/gypfiles/mkgrokdump.gyp
@@ -0,0 +1,27 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['toolchain.gypi', 'features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'mkgrokdump',
+ 'type': 'executable',
+ 'dependencies': [
+ 'v8.gyp:v8',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ '../test/mkgrokdump/mkgrokdump.cc',
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/tools/parser-shell.gyp b/deps/v8/gypfiles/parser-shell.gyp
index 9b94888edf..464e2676ac 100644
--- a/deps/v8/tools/parser-shell.gyp
+++ b/deps/v8/gypfiles/parser-shell.gyp
@@ -30,15 +30,15 @@
'v8_code': 1,
'v8_enable_i18n_support%': 1,
},
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
+ 'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'parser-shell',
'type': 'executable',
'dependencies': [
- '../src/v8.gyp:v8',
- '../src/v8.gyp:v8_libbase',
- '../src/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8_libplatform',
],
'conditions': [
['v8_enable_i18n_support==1', {
@@ -52,8 +52,8 @@
'..',
],
'sources': [
- 'parser-shell.cc',
- 'shell-utils.h',
+ '../tools/parser-shell.cc',
+ '../tools/shell-utils.h',
],
},
],
diff --git a/deps/v8/gypfiles/run-tests-legacy.py b/deps/v8/gypfiles/run-tests-legacy.py
index f1ea478c62..f1ea478c62 100755..100644
--- a/deps/v8/gypfiles/run-tests-legacy.py
+++ b/deps/v8/gypfiles/run-tests-legacy.py
diff --git a/deps/v8/gypfiles/samples.gyp b/deps/v8/gypfiles/samples.gyp
new file mode 100644
index 0000000000..9080c0d445
--- /dev/null
+++ b/deps/v8/gypfiles/samples.gyp
@@ -0,0 +1,61 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ 'v8_enable_i18n_support%': 1,
+ 'v8_toolset_for_shell%': 'target',
+ },
+ 'includes': ['toolchain.gypi', 'features.gypi'],
+ 'target_defaults': {
+ 'type': 'executable',
+ 'dependencies': [
+ 'v8.gyp:v8',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ }],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icudata',
+ ],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'v8_shell',
+ 'sources': [
+ '../samples/shell.cc',
+ ],
+ 'conditions': [
+ [ 'want_separate_host_toolset==1', {
+ 'toolsets': [ '<(v8_toolset_for_shell)', ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'hello-world',
+ 'sources': [
+ '../samples/hello-world.cc',
+ ],
+ },
+ {
+ 'target_name': 'process',
+ 'sources': [
+ '../samples/process.cc',
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/gypfiles/sysroot_ld_flags.sh b/deps/v8/gypfiles/sysroot_ld_flags.sh
index 5cc8011b32..5cc8011b32 100755..100644
--- a/deps/v8/gypfiles/sysroot_ld_flags.sh
+++ b/deps/v8/gypfiles/sysroot_ld_flags.sh
diff --git a/deps/v8/gypfiles/toolchain.gypi b/deps/v8/gypfiles/toolchain.gypi
index 80844cecc6..e67a5e22b9 100644
--- a/deps/v8/gypfiles/toolchain.gypi
+++ b/deps/v8/gypfiles/toolchain.gypi
@@ -58,6 +58,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
+ # MIPS MSA support
+ 'mips_use_msa%': 0,
+
# Print to stdout on Android.
'v8_android_log_stdout%': 0,
@@ -439,6 +442,9 @@
'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@@ -507,6 +513,9 @@
'FPU_MODE_FP64',
],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@@ -558,6 +567,9 @@
'FPU_MODE_FP64',
],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'conditions': [
['mips_fpu_mode=="fp64"', {
@@ -640,6 +652,9 @@
'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@@ -721,6 +736,9 @@
'FPU_MODE_FP64',
],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@@ -778,6 +796,9 @@
'FPU_MODE_FP64',
],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'conditions': [
['mips_fpu_mode=="fp64"', {
@@ -877,6 +898,9 @@
'cflags': ['-mips64r6', '-mabi=64'],
'ldflags': ['-mips64r6', '-mabi=64'],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
'conditions': [
@@ -895,6 +919,9 @@
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
@@ -907,6 +934,9 @@
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
+ ['mips_arch_variant=="r6" and mips_use_msa==1', {
+ 'defines': [ '_MIPS_MSA' ],
+ }],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
@@ -1225,7 +1255,9 @@
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG',
- 'V8_TRACE_MAPS'
+ 'V8_TRACE_MAPS',
+ 'V8_ENABLE_ALLOCATION_TIMEOUT',
+ 'V8_ENABLE_FORCE_SLOW_PATH',
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
diff --git a/deps/v8/gypfiles/unittests.gyp b/deps/v8/gypfiles/unittests.gyp
new file mode 100644
index 0000000000..c6d3713b0c
--- /dev/null
+++ b/deps/v8/gypfiles/unittests.gyp
@@ -0,0 +1,287 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# The sources are kept automatically in sync with BUILD.gn.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ 'unittests_sources': [
+ '../test/unittests/allocation-unittest.cc',
+ '../test/unittests/api/access-check-unittest.cc',
+ '../test/unittests/api/exception-unittest.cc',
+ '../test/unittests/api/interceptor-unittest.cc',
+ '../test/unittests/api/isolate-unittest.cc',
+ '../test/unittests/api/remote-object-unittest.cc',
+ '../test/unittests/api/v8-object-unittest.cc',
+ '../test/unittests/asmjs/asm-scanner-unittest.cc',
+ '../test/unittests/asmjs/asm-types-unittest.cc',
+ '../test/unittests/asmjs/switch-logic-unittest.cc',
+ '../test/unittests/base/atomic-utils-unittest.cc',
+ '../test/unittests/base/bits-unittest.cc',
+ '../test/unittests/base/cpu-unittest.cc',
+ '../test/unittests/base/division-by-constant-unittest.cc',
+ '../test/unittests/base/flags-unittest.cc',
+ '../test/unittests/base/functional-unittest.cc',
+ '../test/unittests/base/ieee754-unittest.cc',
+ '../test/unittests/base/logging-unittest.cc',
+ '../test/unittests/base/macros-unittest.cc',
+ '../test/unittests/base/iterator-unittest.cc',
+ '../test/unittests/base/ostreams-unittest.cc',
+ '../test/unittests/base/platform/condition-variable-unittest.cc',
+ '../test/unittests/base/platform/mutex-unittest.cc',
+ '../test/unittests/base/platform/platform-unittest.cc',
+ '../test/unittests/base/platform/semaphore-unittest.cc',
+ '../test/unittests/base/platform/time-unittest.cc',
+ '../test/unittests/base/sys-info-unittest.cc',
+ '../test/unittests/base/template-utils-unittest.cc',
+ '../test/unittests/base/utils/random-number-generator-unittest.cc',
+ '../test/unittests/bigint-unittest.cc',
+ '../test/unittests/cancelable-tasks-unittest.cc',
+ '../test/unittests/char-predicates-unittest.cc',
+ "../test/unittests/code-stub-assembler-unittest.cc",
+ "../test/unittests/code-stub-assembler-unittest.h",
+ '../test/unittests/compiler/branch-elimination-unittest.cc',
+ '../test/unittests/compiler/bytecode-analysis-unittest.cc',
+ '../test/unittests/compiler/checkpoint-elimination-unittest.cc',
+ "../test/unittests/compiler/code-assembler-unittest.cc",
+ "../test/unittests/compiler/code-assembler-unittest.h",
+ '../test/unittests/compiler/common-operator-reducer-unittest.cc',
+ '../test/unittests/compiler/common-operator-unittest.cc',
+ '../test/unittests/compiler/compiler-test-utils.h',
+ '../test/unittests/compiler/control-equivalence-unittest.cc',
+ '../test/unittests/compiler/control-flow-optimizer-unittest.cc',
+ '../test/unittests/compiler/dead-code-elimination-unittest.cc',
+ '../test/unittests/compiler/diamond-unittest.cc',
+ '../test/unittests/compiler/effect-control-linearizer-unittest.cc',
+ '../test/unittests/compiler/graph-reducer-unittest.cc',
+ '../test/unittests/compiler/graph-reducer-unittest.h',
+ '../test/unittests/compiler/graph-trimmer-unittest.cc',
+ '../test/unittests/compiler/graph-unittest.cc',
+ '../test/unittests/compiler/graph-unittest.h',
+ '../test/unittests/compiler/instruction-unittest.cc',
+ '../test/unittests/compiler/instruction-selector-unittest.cc',
+ '../test/unittests/compiler/instruction-selector-unittest.h',
+ '../test/unittests/compiler/instruction-sequence-unittest.cc',
+ '../test/unittests/compiler/instruction-sequence-unittest.h',
+ '../test/unittests/compiler/int64-lowering-unittest.cc',
+ '../test/unittests/compiler/js-builtin-reducer-unittest.cc',
+ '../test/unittests/compiler/js-create-lowering-unittest.cc',
+ '../test/unittests/compiler/js-intrinsic-lowering-unittest.cc',
+ '../test/unittests/compiler/js-operator-unittest.cc',
+ '../test/unittests/compiler/js-typed-lowering-unittest.cc',
+ '../test/unittests/compiler/linkage-tail-call-unittest.cc',
+ '../test/unittests/compiler/live-range-builder.h',
+ '../test/unittests/compiler/regalloc/live-range-unittest.cc',
+ '../test/unittests/compiler/load-elimination-unittest.cc',
+ '../test/unittests/compiler/loop-peeling-unittest.cc',
+ '../test/unittests/compiler/machine-operator-reducer-unittest.cc',
+ '../test/unittests/compiler/machine-operator-unittest.cc',
+ '../test/unittests/compiler/regalloc/move-optimizer-unittest.cc',
+ '../test/unittests/compiler/node-cache-unittest.cc',
+ '../test/unittests/compiler/node-matchers-unittest.cc',
+ '../test/unittests/compiler/node-properties-unittest.cc',
+ '../test/unittests/compiler/node-test-utils.cc',
+ '../test/unittests/compiler/node-test-utils.h',
+ '../test/unittests/compiler/node-unittest.cc',
+ '../test/unittests/compiler/opcodes-unittest.cc',
+ '../test/unittests/compiler/persistent-unittest.cc',
+ '../test/unittests/compiler/regalloc/register-allocator-unittest.cc',
+ '../test/unittests/compiler/schedule-unittest.cc',
+ '../test/unittests/compiler/scheduler-unittest.cc',
+ '../test/unittests/compiler/scheduler-rpo-unittest.cc',
+ '../test/unittests/compiler/simplified-lowering-unittest.cc',
+ '../test/unittests/compiler/simplified-operator-reducer-unittest.cc',
+ '../test/unittests/compiler/simplified-operator-unittest.cc',
+ '../test/unittests/compiler/state-values-utils-unittest.cc',
+ '../test/unittests/compiler/typed-optimization-unittest.cc',
+ '../test/unittests/compiler/typer-unittest.cc',
+ '../test/unittests/compiler/value-numbering-reducer-unittest.cc',
+ '../test/unittests/compiler/zone-stats-unittest.cc',
+ '../test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc',
+ '../test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc',
+ '../test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc',
+ '../test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc',
+ '../test/unittests/counters-unittest.cc',
+ '../test/unittests/detachable-vector-unittest.cc',
+ '../test/unittests/eh-frame-iterator-unittest.cc',
+ '../test/unittests/eh-frame-writer-unittest.cc',
+ '../test/unittests/heap/barrier-unittest.cc',
+ '../test/unittests/heap/bitmap-unittest.cc',
+ '../test/unittests/heap/embedder-tracing-unittest.cc',
+ '../test/unittests/heap/gc-idle-time-handler-unittest.cc',
+ '../test/unittests/heap/gc-tracer-unittest.cc',
+ '../test/unittests/heap/item-parallel-job-unittest.cc',
+ '../test/unittests/heap/marking-unittest.cc',
+ '../test/unittests/heap/memory-reducer-unittest.cc',
+ '../test/unittests/heap/object-stats-unittest.cc',
+ '../test/unittests/heap/heap-unittest.cc',
+ '../test/unittests/heap/scavenge-job-unittest.cc',
+ '../test/unittests/heap/slot-set-unittest.cc',
+ '../test/unittests/heap/spaces-unittest.cc',
+ '../test/unittests/heap/unmapper-unittest.cc',
+ '../test/unittests/heap/worklist-unittest.cc',
+ '../test/unittests/interpreter/bytecodes-unittest.cc',
+ '../test/unittests/interpreter/bytecode-array-builder-unittest.cc',
+ '../test/unittests/interpreter/bytecode-array-iterator-unittest.cc',
+ '../test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc',
+ '../test/unittests/interpreter/bytecode-array-writer-unittest.cc',
+ '../test/unittests/interpreter/bytecode-decoder-unittest.cc',
+ '../test/unittests/interpreter/bytecode-node-unittest.cc',
+ '../test/unittests/interpreter/bytecode-operands-unittest.cc',
+ '../test/unittests/interpreter/bytecode-register-allocator-unittest.cc',
+ '../test/unittests/interpreter/bytecode-register-optimizer-unittest.cc',
+ '../test/unittests/interpreter/bytecode-source-info-unittest.cc',
+ '../test/unittests/interpreter/bytecode-utils.h',
+ '../test/unittests/interpreter/constant-array-builder-unittest.cc',
+ '../test/unittests/interpreter/interpreter-assembler-unittest.cc',
+ '../test/unittests/interpreter/interpreter-assembler-unittest.h',
+ '../test/unittests/libplatform/default-platform-unittest.cc',
+ '../test/unittests/libplatform/task-queue-unittest.cc',
+ '../test/unittests/libplatform/worker-thread-unittest.cc',
+ '../test/unittests/locked-queue-unittest.cc',
+ '../test/unittests/object-unittest.cc',
+ '../test/unittests/parser/ast-value-unittest.cc',
+ '../test/unittests/parser/preparser-unittest.cc',
+ '../test/unittests/register-configuration-unittest.cc',
+ '../test/unittests/run-all-unittests.cc',
+ '../test/unittests/source-position-table-unittest.cc',
+ '../test/unittests/test-helpers.cc',
+ '../test/unittests/test-helpers.h',
+ '../test/unittests/test-utils.h',
+ '../test/unittests/test-utils.cc',
+ '../test/unittests/unicode-unittest.cc',
+ '../test/unittests/utils-unittest.cc',
+ '../test/unittests/value-serializer-unittest.cc',
+ '../test/unittests/zone/segmentpool-unittest.cc',
+ '../test/unittests/zone/zone-allocator-unittest.cc',
+ '../test/unittests/zone/zone-chunk-list-unittest.cc',
+ '../test/unittests/zone/zone-unittest.cc',
+ '../test/unittests/wasm/control-transfer-unittest.cc',
+ '../test/unittests/wasm/decoder-unittest.cc',
+ '../test/unittests/wasm/function-body-decoder-unittest.cc',
+ '../test/unittests/wasm/wasm-code-manager-unittest.cc',
+ '../test/unittests/wasm/leb-helper-unittest.cc',
+ '../test/unittests/wasm/loop-assignment-analysis-unittest.cc',
+ '../test/unittests/wasm/module-decoder-unittest.cc',
+ '../test/unittests/wasm/streaming-decoder-unittest.cc',
+ '../test/unittests/wasm/trap-handler-unittest.cc',
+ '../test/unittests/wasm/wasm-macro-gen-unittest.cc',
+ '../test/unittests/wasm/wasm-module-builder-unittest.cc',
+ '../test/unittests/wasm/wasm-opcodes-unittest.cc',
+ ],
+ 'unittests_sources_arm': [
+ '../test/unittests/compiler/arm/instruction-selector-arm-unittest.cc',
+ ],
+ 'unittests_sources_arm64': [
+ '../test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc',
+ ],
+ 'unittests_sources_ia32': [
+ '../test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc',
+ ],
+ 'unittests_sources_mips': [
+ '../test/unittests/compiler/mips/instruction-selector-mips-unittest.cc',
+ ],
+ 'unittests_sources_mips64': [
+ '../test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc',
+ ],
+ 'unittests_sources_x64': [
+ '../test/unittests/compiler/x64/instruction-selector-x64-unittest.cc',
+ ],
+ 'unittests_sources_ppc': [
+ '../test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc',
+ ],
+ 'unittests_sources_s390': [
+ '../test/unittests/compiler/s390/instruction-selector-s390-unittest.cc',
+ ],
+ },
+ 'includes': ['toolchain.gypi', 'features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'unittests',
+ 'type': 'executable',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ 'gmock.gyp:gmock',
+ 'gtest.gyp:gtest',
+ 'v8.gyp:v8',
+ 'v8.gyp:v8_libbase',
+ 'v8.gyp:v8_libplatform',
+ 'v8.gyp:v8_maybe_snapshot',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ '<@(unittests_sources)',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="arm"', {
+ 'sources': [
+ '<@(unittests_sources_arm)',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [
+ '<@(unittests_sources_arm64)',
+ ],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'sources': [
+ '<@(unittests_sources_ia32)',
+ ],
+ }],
+ ['v8_target_arch=="mips"', {
+ 'sources': [
+ '<@(unittests_sources_mips)',
+ ],
+ }],
+ ['v8_target_arch=="mipsel"', {
+ 'sources': [
+ '<@(unittests_sources_mips)',
+ ],
+ }],
+ ['v8_target_arch=="mips64"', {
+ 'sources': [
+ '<@(unittests_sources_mips64)',
+ ],
+ }],
+ ['v8_target_arch=="mips64el"', {
+ 'sources': [
+ '<@(unittests_sources_mips64)',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [
+ '<@(unittests_sources_x64)',
+ ],
+ }],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [
+ '<@(unittests_sources_ppc)',
+ ],
+ }],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'sources': [
+ '<@(unittests_sources_s390)',
+ ],
+ }],
+ ['OS=="aix"', {
+ 'ldflags': [ '-Wl,-bbigtoc' ],
+ }],
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ }],
+ ['v8_use_snapshot=="true"', {
+ 'dependencies': ['v8.gyp:v8_initializers'],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/gypfiles/v8.gyp b/deps/v8/gypfiles/v8.gyp
new file mode 100644
index 0000000000..433166290a
--- /dev/null
+++ b/deps/v8/gypfiles/v8.gyp
@@ -0,0 +1,2613 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ 'v8_random_seed%': 314159265,
+ 'v8_vector_stores%': 0,
+ 'embed_script%': "",
+ 'warmup_script%': "",
+ 'v8_extra_library_files%': [],
+ 'v8_experimental_extra_library_files%': [],
+ 'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+ 'v8_os_page_size%': 0,
+ },
+ 'includes': ['toolchain.gypi', 'features.gypi', 'inspector.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'v8',
+ 'dependencies_traverse': 1,
+ 'dependencies': ['v8_maybe_snapshot', 'v8_dump_build_config#target'],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['component=="shared_library"', {
+ 'type': '<(component)',
+ 'sources': [
+ # Note: on non-Windows we still build this file so that gyp
+ # has some sources to link into the component.
+ '../src/v8dll-main.cc',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_SHARED',
+ ],
+ },
+ 'conditions': [
+ ['OS=="mac"', {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
+ },
+ }],
+ ['soname_version!=""', {
+ 'product_extension': 'so.<(soname_version)',
+ }],
+ ],
+ },
+ {
+ 'type': 'none',
+ }],
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include/',
+ ],
+ },
+ },
+ {
+ # This rule delegates to either v8_snapshot, v8_nosnapshot, or
+ # v8_external_snapshot, depending on the current variables.
+ # The intention is to make the 'calling' rules a bit simpler.
+ 'target_name': 'v8_maybe_snapshot',
+ 'type': 'none',
+ 'conditions': [
+ ['v8_use_snapshot!="true"', {
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_init', 'v8_nosnapshot'],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_snapshot'],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
+ 'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
+ 'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ ],
+ }, {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ }],
+ ],
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ]
+ },
+ {
+ 'target_name': 'v8_init',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_initializers',
+ ],
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ '../include/',
+ ],
+ 'sources': [
+ '../src/setup-isolate-full.cc',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_initializers',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ '../include/',
+ ],
+ 'sources': [
+ '../src/builtins/builtins-arguments-gen.cc',
+ '../src/builtins/builtins-arguments-gen.h',
+ '../src/builtins/builtins-array-gen.cc',
+ '../src/builtins/builtins-array-gen.h',
+ '../src/builtins/builtins-async-function-gen.cc',
+ '../src/builtins/builtins-async-gen.cc',
+ '../src/builtins/builtins-async-gen.h',
+ '../src/builtins/builtins-async-generator-gen.cc',
+ '../src/builtins/builtins-async-iterator-gen.cc',
+ '../src/builtins/builtins-boolean-gen.cc',
+ '../src/builtins/builtins-call-gen.cc',
+ '../src/builtins/builtins-call-gen.h',
+ '../src/builtins/builtins-collections-gen.cc',
+ '../src/builtins/builtins-console-gen.cc',
+ '../src/builtins/builtins-constructor-gen.cc',
+ '../src/builtins/builtins-constructor-gen.h',
+ '../src/builtins/builtins-constructor.h',
+ '../src/builtins/builtins-conversion-gen.cc',
+ '../src/builtins/builtins-date-gen.cc',
+ '../src/builtins/builtins-debug-gen.cc',
+ '../src/builtins/builtins-function-gen.cc',
+ '../src/builtins/builtins-generator-gen.cc',
+ '../src/builtins/builtins-global-gen.cc',
+ '../src/builtins/builtins-handler-gen.cc',
+ '../src/builtins/builtins-ic-gen.cc',
+ '../src/builtins/builtins-internal-gen.cc',
+ '../src/builtins/builtins-interpreter-gen.cc',
+ '../src/builtins/builtins-intl-gen.cc',
+ '../src/builtins/builtins-iterator-gen.h',
+ '../src/builtins/builtins-iterator-gen.cc',
+ '../src/builtins/builtins-math-gen.cc',
+ '../src/builtins/builtins-math-gen.h',
+ '../src/builtins/builtins-number-gen.cc',
+ '../src/builtins/builtins-object-gen.cc',
+ '../src/builtins/builtins-promise-gen.cc',
+ '../src/builtins/builtins-promise-gen.h',
+ '../src/builtins/builtins-proxy-gen.cc',
+ '../src/builtins/builtins-proxy-gen.h',
+ '../src/builtins/builtins-reflect-gen.cc',
+ '../src/builtins/builtins-regexp-gen.cc',
+ '../src/builtins/builtins-regexp-gen.h',
+ '../src/builtins/builtins-sharedarraybuffer-gen.cc',
+ '../src/builtins/builtins-string-gen.cc',
+ '../src/builtins/builtins-string-gen.h',
+ '../src/builtins/builtins-symbol-gen.cc',
+ '../src/builtins/builtins-typedarray-gen.cc',
+ '../src/builtins/builtins-typedarray-gen.h',
+ '../src/builtins/builtins-utils-gen.h',
+ '../src/builtins/builtins-wasm-gen.cc',
+ '../src/builtins/growable-fixed-array-gen.cc',
+ '../src/builtins/growable-fixed-array-gen.h',
+ '../src/builtins/setup-builtins-internal.cc',
+ '../src/heap/setup-heap-internal.cc',
+ '../src/ic/accessor-assembler.cc',
+ '../src/ic/accessor-assembler.h',
+ '../src/ic/binary-op-assembler.cc',
+ '../src/ic/binary-op-assembler.h',
+ '../src/ic/keyed-store-generic.cc',
+ '../src/ic/keyed-store-generic.h',
+ '../src/interpreter/interpreter-assembler.cc',
+ '../src/interpreter/interpreter-assembler.h',
+ '../src/interpreter/interpreter-generator.cc',
+ '../src/interpreter/interpreter-generator.h',
+ '../src/interpreter/interpreter-intrinsics-generator.cc',
+ '../src/interpreter/interpreter-intrinsics-generator.h',
+ '../src/interpreter/setup-interpreter-internal.cc',
+ '../src/interpreter/setup-interpreter.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'sources': [
+ '../src/builtins/ia32/builtins-ia32.cc',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [
+ '../src/builtins/x64/builtins-x64.cc',
+ ],
+ }],
+ ['v8_target_arch=="arm"', {
+ 'sources': [
+ '../src/builtins/arm/builtins-arm.cc',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [
+ '../src/builtins/arm64/builtins-arm64.cc',
+ ],
+ }],
+ ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
+ 'sources': [
+ '../src/builtins/mips/builtins-mips.cc',
+ ],
+ }],
+ ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
+ 'sources': [
+ '../src/builtins/mips64/builtins-mips64.cc',
+ ],
+ }],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [
+ '../src/builtins/ppc/builtins-ppc.cc',
+ ],
+ }],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'sources': [
+ '../src/builtins/s390/builtins-s390.cc',
+ ],
+ }],
+ ['v8_enable_i18n_support==0', {
+ 'sources!': [
+ '../src/builtins/builtins-intl-gen.cc',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_snapshot',
+ 'type': 'static_library',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ 'dependencies': [
+ 'mksnapshot#host',
+ 'js2c#host',
+ ],
+ }, {
+ 'toolsets': ['target'],
+ 'dependencies': [
+ 'mksnapshot',
+ 'js2c',
+ ],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_SHARED',
+ ],
+ },
+ }],
+ ],
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ '../src/setup-isolate-deserialize.cc',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'run_mksnapshot',
+ 'inputs': [
+ '<(mksnapshot_exec)',
+ ],
+ 'conditions': [
+ ['embed_script!=""', {
+ 'inputs': [
+ '<(embed_script)',
+ ],
+ }],
+ ['warmup_script!=""', {
+ 'inputs': [
+ '<(warmup_script)',
+ ],
+ }],
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ ],
+ 'variables': {
+ 'mksnapshot_flags': [],
+ 'conditions': [
+ ['v8_random_seed!=0', {
+ 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+ }],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
+ ],
+ },
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'v8_nosnapshot',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ '../src/snapshot/snapshot-empty.cc',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ 'dependencies': ['js2c#host'],
+ }, {
+ 'toolsets': ['target'],
+ 'dependencies': ['js2c'],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ ],
+ }],
+ ]
+ },
+ {
+ 'target_name': 'v8_external_snapshot',
+ 'type': 'static_library',
+ 'conditions': [
+ [ 'v8_use_external_startup_data==1', {
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ 'dependencies': [
+ 'mksnapshot#host',
+ 'js2c#host',
+ 'natives_blob',
+ ]}, {
+ 'toolsets': ['target'],
+ 'dependencies': [
+ 'mksnapshot',
+ 'js2c',
+ 'natives_blob',
+ ],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_SHARED',
+ ],
+ },
+ }],
+ ],
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ '../src/setup-isolate-deserialize.cc',
+ '../src/snapshot/natives-external.cc',
+ '../src/snapshot/snapshot-external.cc',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'run_mksnapshot (external)',
+ 'inputs': [
+ '<(mksnapshot_exec)',
+ ],
+ 'variables': {
+ 'mksnapshot_flags': [],
+ 'conditions': [
+ ['v8_random_seed!=0', {
+ 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+ }],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
+ ['v8_os_page_size!=0', {
+ 'mksnapshot_flags': ['--v8_os_page_size', '<(v8_os_page_size)'],
+ }],
+ ],
+ },
+ 'conditions': [
+ ['embed_script!=""', {
+ 'inputs': [
+ '<(embed_script)',
+ ],
+ }],
+ ['warmup_script!=""', {
+ 'inputs': [
+ '<(warmup_script)',
+ ],
+ }],
+ ['want_separate_host_toolset==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'outputs': [
+ '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ ],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ }],
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ }],
+ ],
+ },
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_base',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_libbase',
+ 'v8_libsampler',
+ 'inspector.gyp:protocol_generated_sources#target',
+ 'inspector.gyp:inspector_injected_script#target',
+ ],
+ 'objs': ['foo.o'],
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ '<(SHARED_INTERMEDIATE_DIR)'
+ ],
+ 'sources': [
+ '<@(inspector_all_sources)',
+ '../include//v8-debug.h',
+ '../include//v8-platform.h',
+ '../include//v8-profiler.h',
+ '../include//v8-testing.h',
+ '../include//v8-util.h',
+ '../include//v8-value-serializer-version.h',
+ '../include//v8-version-string.h',
+ '../include//v8-version.h',
+ '../include//v8.h',
+ '../include//v8config.h',
+ '../src/accessors.cc',
+ '../src/accessors.h',
+ '../src/address-map.cc',
+ '../src/address-map.h',
+ '../src/allocation.cc',
+ '../src/allocation.h',
+ '../src/allocation-site-scopes.h',
+ '../src/api.cc',
+ '../src/api.h',
+ '../src/api-arguments-inl.h',
+ '../src/api-arguments.cc',
+ '../src/api-arguments.h',
+ '../src/api-natives.cc',
+ '../src/api-natives.h',
+ '../src/arguments.cc',
+ '../src/arguments.h',
+ '../src/asmjs/asm-js.cc',
+ '../src/asmjs/asm-js.h',
+ '../src/asmjs/asm-names.h',
+ '../src/asmjs/asm-parser.cc',
+ '../src/asmjs/asm-parser.h',
+ '../src/asmjs/asm-scanner.cc',
+ '../src/asmjs/asm-scanner.h',
+ '../src/asmjs/asm-types.cc',
+ '../src/asmjs/asm-types.h',
+ '../src/asmjs/switch-logic.h',
+ '../src/asmjs/switch-logic.cc',
+ '../src/assembler.cc',
+ '../src/assembler.h',
+ '../src/assembler-inl.h',
+ '../src/assert-scope.h',
+ '../src/assert-scope.cc',
+ '../src/ast/ast-function-literal-id-reindexer.cc',
+ '../src/ast/ast-function-literal-id-reindexer.h',
+ '../src/ast/ast-source-ranges.h',
+ '../src/ast/ast-traversal-visitor.h',
+ '../src/ast/ast-value-factory.cc',
+ '../src/ast/ast-value-factory.h',
+ '../src/ast/ast.cc',
+ '../src/ast/ast.h',
+ '../src/ast/compile-time-value.cc',
+ '../src/ast/compile-time-value.h',
+ '../src/ast/context-slot-cache.cc',
+ '../src/ast/context-slot-cache.h',
+ '../src/ast/modules.cc',
+ '../src/ast/modules.h',
+ '../src/ast/prettyprinter.cc',
+ '../src/ast/prettyprinter.h',
+ '../src/ast/scopes.cc',
+ '../src/ast/scopes.h',
+ '../src/ast/variables.cc',
+ '../src/ast/variables.h',
+ '../src/bailout-reason.cc',
+ '../src/bailout-reason.h',
+ '../src/basic-block-profiler.cc',
+ '../src/basic-block-profiler.h',
+ '../src/bignum-dtoa.cc',
+ '../src/bignum-dtoa.h',
+ '../src/bignum.cc',
+ '../src/bignum.h',
+ '../src/bit-vector.cc',
+ '../src/bit-vector.h',
+ '../src/bootstrapper.cc',
+ '../src/bootstrapper.h',
+ '../src/boxed-float.h',
+ '../src/builtins/builtins-api.cc',
+ '../src/builtins/builtins-arraybuffer.cc',
+ '../src/builtins/builtins-array.cc',
+ '../src/builtins/builtins-bigint.cc',
+ '../src/builtins/builtins-boolean.cc',
+ '../src/builtins/builtins-call.cc',
+ '../src/builtins/builtins-callsite.cc',
+ '../src/builtins/builtins-collections.cc',
+ '../src/builtins/builtins-console.cc',
+ '../src/builtins/builtins-constructor.h',
+ '../src/builtins/builtins-dataview.cc',
+ '../src/builtins/builtins-date.cc',
+ '../src/builtins/builtins-definitions.h',
+ '../src/builtins/builtins-descriptors.h',
+ '../src/builtins/builtins-error.cc',
+ '../src/builtins/builtins-function.cc',
+ '../src/builtins/builtins-global.cc',
+ '../src/builtins/builtins-internal.cc',
+ '../src/builtins/builtins-interpreter.cc',
+ '../src/builtins/builtins-json.cc',
+ '../src/builtins/builtins-math.cc',
+ '../src/builtins/builtins-number.cc',
+ '../src/builtins/builtins-object.cc',
+ '../src/builtins/builtins-promise.cc',
+ '../src/builtins/builtins-reflect.cc',
+ '../src/builtins/builtins-regexp.cc',
+ '../src/builtins/builtins-sharedarraybuffer.cc',
+ '../src/builtins/builtins-string.cc',
+ '../src/builtins/builtins-intl.cc',
+ '../src/builtins/builtins-intl.h',
+ '../src/builtins/builtins-symbol.cc',
+ '../src/builtins/builtins-typedarray.cc',
+ '../src/builtins/builtins-utils.h',
+ '../src/builtins/builtins.cc',
+ '../src/builtins/builtins.h',
+ '../src/cached-powers.cc',
+ '../src/cached-powers.h',
+ '../src/callable.h',
+ '../src/cancelable-task.cc',
+ '../src/cancelable-task.h',
+ '../src/char-predicates.cc',
+ '../src/char-predicates-inl.h',
+ '../src/char-predicates.h',
+ '../src/checks.h',
+ '../src/code-events.h',
+ '../src/code-factory.cc',
+ '../src/code-factory.h',
+ '../src/code-stub-assembler.cc',
+ '../src/code-stub-assembler.h',
+ '../src/code-stubs.cc',
+ '../src/code-stubs.h',
+ '../src/code-stubs-utils.h',
+ '../src/codegen.cc',
+ '../src/codegen.h',
+ '../src/collector.h',
+ '../src/compilation-cache.cc',
+ '../src/compilation-cache.h',
+ '../src/compilation-dependencies.cc',
+ '../src/compilation-dependencies.h',
+ '../src/compilation-info.cc',
+ '../src/compilation-info.h',
+ '../src/compilation-statistics.cc',
+ '../src/compilation-statistics.h',
+ '../src/compiler/access-builder.cc',
+ '../src/compiler/access-builder.h',
+ '../src/compiler/access-info.cc',
+ '../src/compiler/access-info.h',
+ '../src/compiler/all-nodes.cc',
+ '../src/compiler/all-nodes.h',
+ '../src/compiler/allocation-builder.h',
+ '../src/compiler/basic-block-instrumentor.cc',
+ '../src/compiler/basic-block-instrumentor.h',
+ '../src/compiler/branch-elimination.cc',
+ '../src/compiler/branch-elimination.h',
+ '../src/compiler/bytecode-analysis.cc',
+ '../src/compiler/bytecode-analysis.h',
+ '../src/compiler/bytecode-graph-builder.cc',
+ '../src/compiler/bytecode-graph-builder.h',
+ '../src/compiler/bytecode-liveness-map.cc',
+ '../src/compiler/bytecode-liveness-map.h',
+ '../src/compiler/c-linkage.cc',
+ '../src/compiler/checkpoint-elimination.cc',
+ '../src/compiler/checkpoint-elimination.h',
+ '../src/compiler/code-generator-impl.h',
+ '../src/compiler/code-generator.cc',
+ '../src/compiler/code-generator.h',
+ '../src/compiler/code-assembler.cc',
+ '../src/compiler/code-assembler.h',
+ '../src/compiler/common-node-cache.cc',
+ '../src/compiler/common-node-cache.h',
+ '../src/compiler/common-operator-reducer.cc',
+ '../src/compiler/common-operator-reducer.h',
+ '../src/compiler/common-operator.cc',
+ '../src/compiler/common-operator.h',
+ '../src/compiler/control-equivalence.cc',
+ '../src/compiler/control-equivalence.h',
+ '../src/compiler/control-flow-optimizer.cc',
+ '../src/compiler/control-flow-optimizer.h',
+ '../src/compiler/dead-code-elimination.cc',
+ '../src/compiler/dead-code-elimination.h',
+ '../src/compiler/diamond.h',
+ '../src/compiler/effect-control-linearizer.cc',
+ '../src/compiler/effect-control-linearizer.h',
+ '../src/compiler/escape-analysis.cc',
+ '../src/compiler/escape-analysis.h',
+ '../src/compiler/escape-analysis-reducer.cc',
+ '../src/compiler/escape-analysis-reducer.h',
+ '../src/compiler/frame.cc',
+ '../src/compiler/frame.h',
+ '../src/compiler/frame-elider.cc',
+ '../src/compiler/frame-elider.h',
+ '../src/compiler/frame-states.cc',
+ '../src/compiler/frame-states.h',
+ '../src/compiler/functional-list.h',
+ '../src/compiler/gap-resolver.cc',
+ '../src/compiler/gap-resolver.h',
+ '../src/compiler/graph-assembler.cc',
+ '../src/compiler/graph-assembler.h',
+ '../src/compiler/graph-reducer.cc',
+ '../src/compiler/graph-reducer.h',
+ '../src/compiler/graph-trimmer.cc',
+ '../src/compiler/graph-trimmer.h',
+ '../src/compiler/graph-visualizer.cc',
+ '../src/compiler/graph-visualizer.h',
+ '../src/compiler/graph.cc',
+ '../src/compiler/graph.h',
+ '../src/compiler/instruction-codes.h',
+ '../src/compiler/instruction-selector-impl.h',
+ '../src/compiler/instruction-selector.cc',
+ '../src/compiler/instruction-selector.h',
+ '../src/compiler/instruction-scheduler.cc',
+ '../src/compiler/instruction-scheduler.h',
+ '../src/compiler/instruction.cc',
+ '../src/compiler/instruction.h',
+ '../src/compiler/int64-lowering.cc',
+ '../src/compiler/int64-lowering.h',
+ '../src/compiler/js-builtin-reducer.cc',
+ '../src/compiler/js-builtin-reducer.h',
+ '../src/compiler/js-call-reducer.cc',
+ '../src/compiler/js-call-reducer.h',
+ '../src/compiler/js-context-specialization.cc',
+ '../src/compiler/js-context-specialization.h',
+ '../src/compiler/js-create-lowering.cc',
+ '../src/compiler/js-create-lowering.h',
+ '../src/compiler/js-generic-lowering.cc',
+ '../src/compiler/js-generic-lowering.h',
+ '../src/compiler/js-graph.cc',
+ '../src/compiler/js-graph.h',
+ '../src/compiler/js-inlining.cc',
+ '../src/compiler/js-inlining.h',
+ '../src/compiler/js-inlining-heuristic.cc',
+ '../src/compiler/js-inlining-heuristic.h',
+ '../src/compiler/js-intrinsic-lowering.cc',
+ '../src/compiler/js-intrinsic-lowering.h',
+ '../src/compiler/js-native-context-specialization.cc',
+ '../src/compiler/js-native-context-specialization.h',
+ '../src/compiler/js-operator.cc',
+ '../src/compiler/js-operator.h',
+ '../src/compiler/js-type-hint-lowering.cc',
+ '../src/compiler/js-type-hint-lowering.h',
+ '../src/compiler/js-typed-lowering.cc',
+ '../src/compiler/js-typed-lowering.h',
+ '../src/compiler/jump-threading.cc',
+ '../src/compiler/jump-threading.h',
+ '../src/compiler/linkage.cc',
+ '../src/compiler/linkage.h',
+ '../src/compiler/live-range-separator.cc',
+ '../src/compiler/live-range-separator.h',
+ '../src/compiler/load-elimination.cc',
+ '../src/compiler/load-elimination.h',
+ '../src/compiler/loop-analysis.cc',
+ '../src/compiler/loop-analysis.h',
+ '../src/compiler/loop-peeling.cc',
+ '../src/compiler/loop-peeling.h',
+ '../src/compiler/loop-variable-optimizer.cc',
+ '../src/compiler/loop-variable-optimizer.h',
+ '../src/compiler/machine-operator-reducer.cc',
+ '../src/compiler/machine-operator-reducer.h',
+ '../src/compiler/machine-operator.cc',
+ '../src/compiler/machine-operator.h',
+ '../src/compiler/machine-graph-verifier.cc',
+ '../src/compiler/machine-graph-verifier.h',
+ '../src/compiler/memory-optimizer.cc',
+ '../src/compiler/memory-optimizer.h',
+ '../src/compiler/move-optimizer.cc',
+ '../src/compiler/move-optimizer.h',
+ '../src/compiler/node-aux-data.h',
+ '../src/compiler/node-cache.cc',
+ '../src/compiler/node-cache.h',
+ '../src/compiler/node-marker.cc',
+ '../src/compiler/node-marker.h',
+ '../src/compiler/node-matchers.cc',
+ '../src/compiler/node-matchers.h',
+ '../src/compiler/node-properties.cc',
+ '../src/compiler/node-properties.h',
+ '../src/compiler/node.cc',
+ '../src/compiler/node.h',
+ '../src/compiler/opcodes.cc',
+ '../src/compiler/opcodes.h',
+ '../src/compiler/operation-typer.cc',
+ '../src/compiler/operation-typer.h',
+ '../src/compiler/operator-properties.cc',
+ '../src/compiler/operator-properties.h',
+ '../src/compiler/operator.cc',
+ '../src/compiler/operator.h',
+ '../src/compiler/osr.cc',
+ '../src/compiler/osr.h',
+ '../src/compiler/persistent-map.h',
+ '../src/compiler/pipeline.cc',
+ '../src/compiler/pipeline.h',
+ '../src/compiler/pipeline-statistics.cc',
+ '../src/compiler/pipeline-statistics.h',
+ '../src/compiler/property-access-builder.cc',
+ '../src/compiler/property-access-builder.h',
+ '../src/compiler/raw-machine-assembler.cc',
+ '../src/compiler/raw-machine-assembler.h',
+ '../src/compiler/redundancy-elimination.cc',
+ '../src/compiler/redundancy-elimination.h',
+ '../src/compiler/register-allocator.cc',
+ '../src/compiler/register-allocator.h',
+ '../src/compiler/register-allocator-verifier.cc',
+ '../src/compiler/register-allocator-verifier.h',
+ '../src/compiler/representation-change.cc',
+ '../src/compiler/representation-change.h',
+ '../src/compiler/schedule.cc',
+ '../src/compiler/schedule.h',
+ '../src/compiler/scheduler.cc',
+ '../src/compiler/scheduler.h',
+ '../src/compiler/select-lowering.cc',
+ '../src/compiler/select-lowering.h',
+ '../src/compiler/simd-scalar-lowering.cc',
+ '../src/compiler/simd-scalar-lowering.h',
+ '../src/compiler/simplified-lowering.cc',
+ '../src/compiler/simplified-lowering.h',
+ '../src/compiler/simplified-operator-reducer.cc',
+ '../src/compiler/simplified-operator-reducer.h',
+ '../src/compiler/simplified-operator.cc',
+ '../src/compiler/simplified-operator.h',
+ '../src/compiler/compiler-source-position-table.cc',
+ '../src/compiler/compiler-source-position-table.h',
+ '../src/compiler/state-values-utils.cc',
+ '../src/compiler/state-values-utils.h',
+ '../src/compiler/store-store-elimination.cc',
+ '../src/compiler/store-store-elimination.h',
+ '../src/compiler/types.cc',
+ '../src/compiler/types.h',
+ '../src/compiler/type-cache.cc',
+ '../src/compiler/type-cache.h',
+ '../src/compiler/typed-optimization.cc',
+ '../src/compiler/typed-optimization.h',
+ '../src/compiler/typer.cc',
+ '../src/compiler/typer.h',
+ '../src/compiler/unwinding-info-writer.h',
+ '../src/compiler/value-numbering-reducer.cc',
+ '../src/compiler/value-numbering-reducer.h',
+ '../src/compiler/verifier.cc',
+ '../src/compiler/verifier.h',
+ '../src/compiler/wasm-compiler.cc',
+ '../src/compiler/wasm-compiler.h',
+ '../src/compiler/wasm-linkage.cc',
+ '../src/compiler/zone-stats.cc',
+ '../src/compiler/zone-stats.h',
+ '../src/compiler-dispatcher/compiler-dispatcher.cc',
+ '../src/compiler-dispatcher/compiler-dispatcher.h',
+ '../src/compiler-dispatcher/compiler-dispatcher-job.cc',
+ '../src/compiler-dispatcher/compiler-dispatcher-job.h',
+ '../src/compiler-dispatcher/compiler-dispatcher-tracer.cc',
+ '../src/compiler-dispatcher/compiler-dispatcher-tracer.h',
+ '../src/compiler-dispatcher/optimizing-compile-dispatcher.cc',
+ '../src/compiler-dispatcher/optimizing-compile-dispatcher.h',
+ '../src/compiler-dispatcher/unoptimized-compile-job.cc',
+ '../src/compiler-dispatcher/unoptimized-compile-job.h',
+ '../src/compiler.cc',
+ '../src/compiler.h',
+ '../src/contexts-inl.h',
+ '../src/contexts.cc',
+ '../src/contexts.h',
+ '../src/conversions-inl.h',
+ '../src/conversions.cc',
+ '../src/conversions.h',
+ '../src/counters-inl.h',
+ '../src/counters.cc',
+ '../src/counters.h',
+ '../src/date.cc',
+ '../src/date.h',
+ '../src/dateparser-inl.h',
+ '../src/dateparser.cc',
+ '../src/dateparser.h',
+ '../src/debug/debug-coverage.cc',
+ '../src/debug/debug-coverage.h',
+ '../src/debug/debug-evaluate.cc',
+ '../src/debug/debug-evaluate.h',
+ '../src/debug/debug-interface.h',
+ '../src/debug/debug-frames.cc',
+ '../src/debug/debug-frames.h',
+ '../src/debug/debug-scope-iterator.cc',
+ '../src/debug/debug-scope-iterator.h',
+ '../src/debug/debug-scopes.cc',
+ '../src/debug/debug-scopes.h',
+ '../src/debug/debug-stack-trace-iterator.cc',
+ '../src/debug/debug-stack-trace-iterator.h',
+ '../src/debug/debug-type-profile.cc',
+ '../src/debug/debug-type-profile.h',
+ '../src/debug/debug.cc',
+ '../src/debug/debug.h',
+ '../src/debug/interface-types.h',
+ '../src/debug/liveedit.cc',
+ '../src/debug/liveedit.h',
+ '../src/deoptimize-reason.cc',
+ '../src/deoptimize-reason.h',
+ '../src/deoptimizer.cc',
+ '../src/deoptimizer.h',
+ '../src/detachable-vector.h',
+ '../src/disasm.h',
+ '../src/disassembler.cc',
+ '../src/disassembler.h',
+ '../src/diy-fp.cc',
+ '../src/diy-fp.h',
+ '../src/double.h',
+ '../src/dtoa.cc',
+ '../src/dtoa.h',
+ '../src/eh-frame.cc',
+ '../src/eh-frame.h',
+ '../src/elements-kind.cc',
+ '../src/elements-kind.h',
+ '../src/elements.cc',
+ '../src/elements.h',
+ '../src/execution.cc',
+ '../src/execution.h',
+ '../src/extensions/externalize-string-extension.cc',
+ '../src/extensions/externalize-string-extension.h',
+ '../src/extensions/free-buffer-extension.cc',
+ '../src/extensions/free-buffer-extension.h',
+ '../src/extensions/gc-extension.cc',
+ '../src/extensions/gc-extension.h',
+ '../src/extensions/ignition-statistics-extension.cc',
+ '../src/extensions/ignition-statistics-extension.h',
+ '../src/extensions/statistics-extension.cc',
+ '../src/extensions/statistics-extension.h',
+ '../src/extensions/trigger-failure-extension.cc',
+ '../src/extensions/trigger-failure-extension.h',
+ '../src/external-reference-table.cc',
+ '../src/external-reference-table.h',
+ '../src/factory-inl.h',
+ '../src/factory.cc',
+ '../src/factory.h',
+ '../src/fast-dtoa.cc',
+ '../src/fast-dtoa.h',
+ '../src/feedback-vector-inl.h',
+ '../src/feedback-vector.cc',
+ '../src/feedback-vector.h',
+ '../src/field-index.h',
+ '../src/field-index-inl.h',
+ '../src/field-type.cc',
+ '../src/field-type.h',
+ '../src/fixed-dtoa.cc',
+ '../src/fixed-dtoa.h',
+ '../src/flag-definitions.h',
+ '../src/flags.cc',
+ '../src/flags.h',
+ '../src/frame-constants.h',
+ '../src/frames-inl.h',
+ '../src/frames.cc',
+ '../src/frames.h',
+ '../src/futex-emulation.cc',
+ '../src/futex-emulation.h',
+ '../src/gdb-jit.cc',
+ '../src/gdb-jit.h',
+ '../src/global-handles.cc',
+ '../src/global-handles.h',
+ '../src/globals.h',
+ '../src/handles-inl.h',
+ '../src/handles.cc',
+ '../src/handles.h',
+ '../src/heap-symbols.h',
+ '../src/heap/array-buffer-collector.cc',
+ '../src/heap/array-buffer-collector.h',
+ '../src/heap/array-buffer-tracker-inl.h',
+ '../src/heap/array-buffer-tracker.cc',
+ '../src/heap/array-buffer-tracker.h',
+ '../src/heap/barrier.h',
+ '../src/heap/code-stats.cc',
+ '../src/heap/code-stats.h',
+ '../src/heap/concurrent-marking.cc',
+ '../src/heap/concurrent-marking.h',
+ '../src/heap/embedder-tracing.cc',
+ '../src/heap/embedder-tracing.h',
+ '../src/heap/memory-reducer.cc',
+ '../src/heap/memory-reducer.h',
+ '../src/heap/gc-idle-time-handler.cc',
+ '../src/heap/gc-idle-time-handler.h',
+ '../src/heap/gc-tracer.cc',
+ '../src/heap/gc-tracer.h',
+ '../src/heap/heap-inl.h',
+ '../src/heap/heap.cc',
+ '../src/heap/heap.h',
+ '../src/heap/incremental-marking-inl.h',
+ '../src/heap/incremental-marking-job.cc',
+ '../src/heap/incremental-marking-job.h',
+ '../src/heap/incremental-marking.cc',
+ '../src/heap/incremental-marking.h',
+ '../src/heap/invalidated-slots-inl.h',
+ '../src/heap/invalidated-slots.cc',
+ '../src/heap/invalidated-slots.h',
+ '../src/heap/item-parallel-job.cc',
+ '../src/heap/item-parallel-job.h',
+ '../src/heap/local-allocator.h',
+ '../src/heap/mark-compact-inl.h',
+ '../src/heap/mark-compact.cc',
+ '../src/heap/mark-compact.h',
+ '../src/heap/marking.cc',
+ '../src/heap/marking.h',
+ '../src/heap/object-stats.cc',
+ '../src/heap/object-stats.h',
+ '../src/heap/objects-visiting-inl.h',
+ '../src/heap/objects-visiting.cc',
+ '../src/heap/objects-visiting.h',
+ '../src/heap/remembered-set.h',
+ '../src/heap/scavenge-job.h',
+ '../src/heap/scavenge-job.cc',
+ '../src/heap/scavenger-inl.h',
+ '../src/heap/scavenger.cc',
+ '../src/heap/scavenger.h',
+ '../src/heap/slot-set.h',
+ '../src/heap/spaces-inl.h',
+ '../src/heap/spaces.cc',
+ '../src/heap/spaces.h',
+ '../src/heap/store-buffer.cc',
+ '../src/heap/store-buffer.h',
+ '../src/heap/stress-marking-observer.cc',
+ '../src/heap/stress-marking-observer.h',
+ '../src/heap/stress-scavenge-observer.cc',
+ '../src/heap/stress-scavenge-observer.h',
+ '../src/heap/sweeper.cc',
+ '../src/heap/sweeper.h',
+ '../src/heap/worklist.h',
+ '../src/intl.cc',
+ '../src/intl.h',
+ '../src/icu_util.cc',
+ '../src/icu_util.h',
+ '../src/ic/call-optimization.cc',
+ '../src/ic/call-optimization.h',
+ '../src/ic/handler-configuration.cc',
+ '../src/ic/handler-configuration-inl.h',
+ '../src/ic/handler-configuration.h',
+ '../src/ic/ic-inl.h',
+ '../src/ic/ic-stats.cc',
+ '../src/ic/ic-stats.h',
+ '../src/ic/ic.cc',
+ '../src/ic/ic.h',
+ '../src/identity-map.cc',
+ '../src/identity-map.h',
+ '../src/instruction-stream.cc',
+ '../src/instruction-stream.h',
+ '../src/interface-descriptors.cc',
+ '../src/interface-descriptors.h',
+ '../src/interpreter/block-coverage-builder.h',
+ '../src/interpreter/bytecodes.cc',
+ '../src/interpreter/bytecodes.h',
+ '../src/interpreter/bytecode-array-accessor.cc',
+ '../src/interpreter/bytecode-array-accessor.h',
+ '../src/interpreter/bytecode-array-builder.cc',
+ '../src/interpreter/bytecode-array-builder.h',
+ '../src/interpreter/bytecode-array-iterator.cc',
+ '../src/interpreter/bytecode-array-iterator.h',
+ '../src/interpreter/bytecode-array-random-iterator.cc',
+ '../src/interpreter/bytecode-array-random-iterator.h',
+ '../src/interpreter/bytecode-array-writer.cc',
+ '../src/interpreter/bytecode-array-writer.h',
+ '../src/interpreter/bytecode-decoder.cc',
+ '../src/interpreter/bytecode-decoder.h',
+ '../src/interpreter/bytecode-flags.cc',
+ '../src/interpreter/bytecode-flags.h',
+ '../src/interpreter/bytecode-generator.cc',
+ '../src/interpreter/bytecode-generator.h',
+ '../src/interpreter/bytecode-label.cc',
+ '../src/interpreter/bytecode-label.h',
+ '../src/interpreter/bytecode-node.cc',
+ '../src/interpreter/bytecode-node.h',
+ '../src/interpreter/bytecode-operands.cc',
+ '../src/interpreter/bytecode-operands.h',
+ '../src/interpreter/bytecode-register.cc',
+ '../src/interpreter/bytecode-register.h',
+ '../src/interpreter/bytecode-register-allocator.h',
+ '../src/interpreter/bytecode-register-optimizer.cc',
+ '../src/interpreter/bytecode-register-optimizer.h',
+ '../src/interpreter/bytecode-source-info.cc',
+ '../src/interpreter/bytecode-source-info.h',
+ '../src/interpreter/bytecode-jump-table.h',
+ '../src/interpreter/bytecode-traits.h',
+ '../src/interpreter/constant-array-builder.cc',
+ '../src/interpreter/constant-array-builder.h',
+ '../src/interpreter/control-flow-builders.cc',
+ '../src/interpreter/control-flow-builders.h',
+ '../src/interpreter/handler-table-builder.cc',
+ '../src/interpreter/handler-table-builder.h',
+ '../src/interpreter/interpreter.cc',
+ '../src/interpreter/interpreter.h',
+ '../src/interpreter/interpreter-generator.h',
+ '../src/interpreter/interpreter-intrinsics.cc',
+ '../src/interpreter/interpreter-intrinsics.h',
+ '../src/isolate-inl.h',
+ '../src/isolate.cc',
+ '../src/isolate.h',
+ '../src/json-parser.cc',
+ '../src/json-parser.h',
+ '../src/json-stringifier.cc',
+ '../src/json-stringifier.h',
+ '../src/keys.h',
+ '../src/keys.cc',
+ '../src/label.h',
+ '../src/layout-descriptor-inl.h',
+ '../src/layout-descriptor.cc',
+ '../src/layout-descriptor.h',
+ '../src/locked-queue-inl.h',
+ '../src/locked-queue.h',
+ '../src/log-inl.h',
+ '../src/log-utils.cc',
+ '../src/log-utils.h',
+ '../src/log.cc',
+ '../src/log.h',
+ '../src/lookup-cache-inl.h',
+ '../src/lookup-cache.cc',
+ '../src/lookup-cache.h',
+ '../src/lookup.cc',
+ '../src/lookup.h',
+ '../src/map-updater.cc',
+ '../src/map-updater.h',
+ '../src/macro-assembler-inl.h',
+ '../src/macro-assembler.h',
+ '../src/machine-type.cc',
+ '../src/machine-type.h',
+ '../src/managed.h',
+ '../src/messages.cc',
+ '../src/messages.h',
+ '../src/msan.h',
+ '../src/objects-body-descriptors-inl.h',
+ '../src/objects-body-descriptors.h',
+ '../src/objects-debug.cc',
+ '../src/objects-inl.h',
+ '../src/objects-printer.cc',
+ '../src/objects.cc',
+ '../src/objects.h',
+ '../src/objects/arguments-inl.h',
+ '../src/objects/arguments.h',
+ '../src/objects/bigint.cc',
+ '../src/objects/bigint.h',
+ '../src/objects/code-inl.h',
+ '../src/objects/code.h',
+ '../src/objects/compilation-cache.h',
+ '../src/objects/compilation-cache-inl.h',
+ '../src/objects/data-handler.h',
+ '../src/objects/data-handler-inl.h',
+ '../src/objects/debug-objects-inl.h',
+ '../src/objects/debug-objects.cc',
+ '../src/objects/debug-objects.h',
+ '../src/objects/descriptor-array.h',
+ '../src/objects/dictionary.h',
+ '../src/objects/fixed-array.h',
+ '../src/objects/fixed-array-inl.h',
+ '../src/objects/frame-array.h',
+ '../src/objects/frame-array-inl.h',
+ '../src/objects/hash-table-inl.h',
+ '../src/objects/hash-table.h',
+ '../src/objects/intl-objects.cc',
+ '../src/objects/intl-objects.h',
+ '../src/objects/js-array.h',
+ '../src/objects/js-array-inl.h',
+ '../src/objects/js-collection.h',
+ '../src/objects/js-collection-inl.h',
+ '../src/objects/js-regexp.h',
+ '../src/objects/js-regexp-inl.h',
+ '../src/objects/literal-objects.cc',
+ '../src/objects/literal-objects-inl.h',
+ '../src/objects/literal-objects.h',
+ '../src/objects/map-inl.h',
+ '../src/objects/map.h',
+ '../src/objects/name-inl.h',
+ '../src/objects/name.h',
+ '../src/objects/module-inl.h',
+ '../src/objects/module.cc',
+ '../src/objects/module.h',
+ '../src/objects/object-macros.h',
+ '../src/objects/object-macros-undef.h',
+ '../src/objects/property-descriptor-object.h',
+ '../src/objects/property-descriptor-object-inl.h',
+ '../src/objects/regexp-match-info.h',
+ '../src/objects/scope-info.cc',
+ '../src/objects/scope-info.h',
+ '../src/objects/script.h',
+ '../src/objects/script-inl.h',
+ '../src/objects/shared-function-info-inl.h',
+ '../src/objects/shared-function-info.h',
+ '../src/objects/string-inl.h',
+ '../src/objects/string.h',
+ '../src/objects/string-table.h',
+ '../src/objects/template-objects.cc',
+ '../src/objects/template-objects.h',
+ '../src/ostreams.cc',
+ '../src/ostreams.h',
+ '../src/parsing/background-parsing-task.cc',
+ '../src/parsing/background-parsing-task.h',
+ '../src/parsing/duplicate-finder.h',
+ '../src/parsing/expression-classifier.h',
+ '../src/parsing/expression-scope-reparenter.cc',
+ '../src/parsing/expression-scope-reparenter.h',
+ '../src/parsing/func-name-inferrer.cc',
+ '../src/parsing/func-name-inferrer.h',
+ '../src/parsing/parse-info.cc',
+ '../src/parsing/parse-info.h',
+ '../src/parsing/parser-base.h',
+ '../src/parsing/parser.cc',
+ '../src/parsing/parser.h',
+ '../src/parsing/parsing.cc',
+ '../src/parsing/parsing.h',
+ '../src/parsing/pattern-rewriter.cc',
+ '../src/parsing/preparse-data-format.h',
+ '../src/parsing/preparse-data.cc',
+ '../src/parsing/preparse-data.h',
+ '../src/parsing/preparsed-scope-data.cc',
+ '../src/parsing/preparsed-scope-data.h',
+ '../src/parsing/preparser.cc',
+ '../src/parsing/preparser.h',
+ '../src/parsing/rewriter.cc',
+ '../src/parsing/rewriter.h',
+ '../src/parsing/scanner-character-streams.cc',
+ '../src/parsing/scanner-character-streams.h',
+ '../src/parsing/scanner.cc',
+ '../src/parsing/scanner.h',
+ '../src/parsing/token.cc',
+ '../src/parsing/token.h',
+ '../src/pending-compilation-error-handler.cc',
+ '../src/pending-compilation-error-handler.h',
+ '../src/perf-jit.cc',
+ '../src/perf-jit.h',
+ '../src/profiler/allocation-tracker.cc',
+ '../src/profiler/allocation-tracker.h',
+ '../src/profiler/circular-queue-inl.h',
+ '../src/profiler/circular-queue.h',
+ '../src/profiler/cpu-profiler-inl.h',
+ '../src/profiler/cpu-profiler.cc',
+ '../src/profiler/cpu-profiler.h',
+ '../src/profiler/heap-profiler.cc',
+ '../src/profiler/heap-profiler.h',
+ '../src/profiler/heap-snapshot-generator-inl.h',
+ '../src/profiler/heap-snapshot-generator.cc',
+ '../src/profiler/heap-snapshot-generator.h',
+ '../src/profiler/profiler-listener.cc',
+ '../src/profiler/profiler-listener.h',
+ '../src/profiler/profile-generator-inl.h',
+ '../src/profiler/profile-generator.cc',
+ '../src/profiler/profile-generator.h',
+ '../src/profiler/sampling-heap-profiler.cc',
+ '../src/profiler/sampling-heap-profiler.h',
+ '../src/profiler/strings-storage.cc',
+ '../src/profiler/strings-storage.h',
+ '../src/profiler/tick-sample.cc',
+ '../src/profiler/tick-sample.h',
+ '../src/profiler/tracing-cpu-profiler.cc',
+ '../src/profiler/tracing-cpu-profiler.h',
+ '../src/profiler/unbound-queue-inl.h',
+ '../src/profiler/unbound-queue.h',
+ '../src/property-descriptor.cc',
+ '../src/property-descriptor.h',
+ '../src/property-details.h',
+ '../src/property.cc',
+ '../src/property.h',
+ '../src/prototype.h',
+ '../src/regexp/bytecodes-irregexp.h',
+ '../src/regexp/interpreter-irregexp.cc',
+ '../src/regexp/interpreter-irregexp.h',
+ '../src/regexp/jsregexp-inl.h',
+ '../src/regexp/jsregexp.cc',
+ '../src/regexp/jsregexp.h',
+ '../src/regexp/regexp-ast.cc',
+ '../src/regexp/regexp-ast.h',
+ '../src/regexp/regexp-macro-assembler-irregexp-inl.h',
+ '../src/regexp/regexp-macro-assembler-irregexp.cc',
+ '../src/regexp/regexp-macro-assembler-irregexp.h',
+ '../src/regexp/regexp-macro-assembler-tracer.cc',
+ '../src/regexp/regexp-macro-assembler-tracer.h',
+ '../src/regexp/regexp-macro-assembler.cc',
+ '../src/regexp/regexp-macro-assembler.h',
+ '../src/regexp/regexp-parser.cc',
+ '../src/regexp/regexp-parser.h',
+ '../src/regexp/regexp-stack.cc',
+ '../src/regexp/regexp-stack.h',
+ '../src/regexp/regexp-utils.cc',
+ '../src/regexp/regexp-utils.h',
+ '../src/register-configuration.cc',
+ '../src/register-configuration.h',
+ '../src/reglist.h',
+ '../src/runtime-profiler.cc',
+ '../src/runtime-profiler.h',
+ '../src/runtime/runtime-array.cc',
+ '../src/runtime/runtime-atomics.cc',
+ '../src/runtime/runtime-bigint.cc',
+ '../src/runtime/runtime-classes.cc',
+ '../src/runtime/runtime-collections.cc',
+ '../src/runtime/runtime-compiler.cc',
+ '../src/runtime/runtime-date.cc',
+ '../src/runtime/runtime-debug.cc',
+ '../src/runtime/runtime-forin.cc',
+ '../src/runtime/runtime-function.cc',
+ '../src/runtime/runtime-error.cc',
+ '../src/runtime/runtime-futex.cc',
+ '../src/runtime/runtime-generator.cc',
+ '../src/runtime/runtime-intl.cc',
+ '../src/runtime/runtime-internal.cc',
+ '../src/runtime/runtime-interpreter.cc',
+ '../src/runtime/runtime-literals.cc',
+ '../src/runtime/runtime-liveedit.cc',
+ '../src/runtime/runtime-maths.cc',
+ '../src/runtime/runtime-module.cc',
+ '../src/runtime/runtime-numbers.cc',
+ '../src/runtime/runtime-object.cc',
+ '../src/runtime/runtime-operators.cc',
+ '../src/runtime/runtime-promise.cc',
+ '../src/runtime/runtime-proxy.cc',
+ '../src/runtime/runtime-regexp.cc',
+ '../src/runtime/runtime-scopes.cc',
+ '../src/runtime/runtime-strings.cc',
+ '../src/runtime/runtime-symbol.cc',
+ '../src/runtime/runtime-test.cc',
+ '../src/runtime/runtime-typedarray.cc',
+ '../src/runtime/runtime-utils.h',
+ '../src/runtime/runtime-wasm.cc',
+ '../src/runtime/runtime.cc',
+ '../src/runtime/runtime.h',
+ '../src/safepoint-table.cc',
+ '../src/safepoint-table.h',
+ '../src/setup-isolate.h',
+ '../src/signature.h',
+ '../src/simulator-base.cc',
+ '../src/simulator-base.h',
+ '../src/simulator.h',
+ '../src/snapshot/builtin-deserializer-allocator.cc',
+ '../src/snapshot/builtin-deserializer-allocator.h',
+ '../src/snapshot/builtin-deserializer.cc',
+ '../src/snapshot/builtin-deserializer.h',
+ '../src/snapshot/builtin-serializer-allocator.cc',
+ '../src/snapshot/builtin-serializer-allocator.h',
+ '../src/snapshot/builtin-serializer.cc',
+ '../src/snapshot/builtin-serializer.h',
+ '../src/snapshot/builtin-snapshot-utils.cc',
+ '../src/snapshot/builtin-snapshot-utils.h',
+ '../src/snapshot/code-serializer.cc',
+ '../src/snapshot/code-serializer.h',
+ '../src/snapshot/default-deserializer-allocator.cc',
+ '../src/snapshot/default-deserializer-allocator.h',
+ '../src/snapshot/default-serializer-allocator.cc',
+ '../src/snapshot/default-serializer-allocator.h',
+ '../src/snapshot/deserializer.cc',
+ '../src/snapshot/deserializer.h',
+ '../src/snapshot/natives-common.cc',
+ '../src/snapshot/natives.h',
+ '../src/snapshot/object-deserializer.cc',
+ '../src/snapshot/object-deserializer.h',
+ '../src/snapshot/partial-deserializer.cc',
+ '../src/snapshot/partial-deserializer.h',
+ '../src/snapshot/partial-serializer.cc',
+ '../src/snapshot/partial-serializer.h',
+ '../src/snapshot/serializer.cc',
+ '../src/snapshot/serializer-common.cc',
+ '../src/snapshot/serializer-common.h',
+ '../src/snapshot/serializer.h',
+ '../src/snapshot/snapshot-common.cc',
+ '../src/snapshot/snapshot.h',
+ '../src/snapshot/snapshot-source-sink.cc',
+ '../src/snapshot/snapshot-source-sink.h',
+ '../src/snapshot/startup-deserializer.cc',
+ '../src/snapshot/startup-deserializer.h',
+ '../src/snapshot/startup-serializer.cc',
+ '../src/snapshot/startup-serializer.h',
+ '../src/source-position-table.cc',
+ '../src/source-position-table.h',
+ '../src/source-position.cc',
+ '../src/source-position.h',
+ '../src/splay-tree.h',
+ '../src/splay-tree-inl.h',
+ '../src/startup-data-util.cc',
+ '../src/startup-data-util.h',
+ '../src/string-builder.cc',
+ '../src/string-builder.h',
+ '../src/string-case.cc',
+ '../src/string-case.h',
+ '../src/string-hasher-inl.h',
+ '../src/string-hasher.h',
+ '../src/string-search.h',
+ '../src/string-stream.cc',
+ '../src/string-stream.h',
+ '../src/strtod.cc',
+ '../src/strtod.h',
+ '../src/ic/stub-cache.cc',
+ '../src/ic/stub-cache.h',
+ '../src/third_party/utf8-decoder/utf8-decoder.h',
+ '../src/tracing/trace-event.cc',
+ '../src/tracing/trace-event.h',
+ '../src/tracing/traced-value.cc',
+ '../src/tracing/traced-value.h',
+ '../src/tracing/tracing-category-observer.cc',
+ '../src/tracing/tracing-category-observer.h',
+ '../src/transitions-inl.h',
+ '../src/transitions.cc',
+ '../src/transitions.h',
+ '../src/trap-handler/handler-outside.cc',
+ '../src/trap-handler/handler-shared.cc',
+ '../src/trap-handler/trap-handler.h',
+ '../src/trap-handler/trap-handler-internal.h',
+ '../src/type-hints.cc',
+ '../src/type-hints.h',
+ '../src/unicode-inl.h',
+ '../src/unicode.cc',
+ '../src/unicode.h',
+ '../src/unicode-cache-inl.h',
+ '../src/unicode-cache.h',
+ '../src/unicode-decoder.cc',
+ '../src/unicode-decoder.h',
+ '../src/uri.cc',
+ '../src/uri.h',
+ '../src/utils-inl.h',
+ '../src/utils.cc',
+ '../src/utils.h',
+ '../src/v8.cc',
+ '../src/v8.h',
+ '../src/v8memory.h',
+ '../src/v8threads.cc',
+ '../src/v8threads.h',
+ '../src/value-serializer.cc',
+ '../src/value-serializer.h',
+ '../src/vector-slot-pair.cc',
+ '../src/vector-slot-pair.h',
+ '../src/vector.h',
+ '../src/version.cc',
+ '../src/version.h',
+ '../src/visitors.cc',
+ '../src/visitors.h',
+ '../src/vm-state-inl.h',
+ '../src/vm-state.h',
+ '../src/wasm/baseline/liftoff-assembler-defs.h',
+ '../src/wasm/baseline/liftoff-assembler.cc',
+ '../src/wasm/baseline/liftoff-assembler.h',
+ '../src/wasm/baseline/liftoff-compiler.cc',
+ '../src/wasm/baseline/liftoff-register.h',
+ '../src/wasm/compilation-manager.cc',
+ '../src/wasm/compilation-manager.h',
+ '../src/wasm/decoder.h',
+ '../src/wasm/function-body-decoder.cc',
+ '../src/wasm/function-body-decoder.h',
+ '../src/wasm/function-body-decoder-impl.h',
+ '../src/wasm/leb-helper.h',
+ '../src/wasm/local-decl-encoder.cc',
+ '../src/wasm/local-decl-encoder.h',
+ '../src/wasm/memory-tracing.cc',
+ '../src/wasm/memory-tracing.h',
+ '../src/wasm/module-compiler.cc',
+ '../src/wasm/module-compiler.h',
+ '../src/wasm/module-decoder.cc',
+ '../src/wasm/module-decoder.h',
+ '../src/wasm/signature-map.cc',
+ '../src/wasm/signature-map.h',
+ '../src/wasm/streaming-decoder.cc',
+ '../src/wasm/streaming-decoder.h',
+ '../src/wasm/wasm-code-manager.cc',
+ '../src/wasm/wasm-code-manager.h',
+ '../src/wasm/wasm-code-specialization.cc',
+ '../src/wasm/wasm-code-specialization.h',
+ '../src/wasm/wasm-code-wrapper.cc',
+ '../src/wasm/wasm-code-wrapper.h',
+ '../src/wasm/wasm-constants.h',
+ '../src/wasm/wasm-debug.cc',
+ '../src/wasm/wasm-engine.cc',
+ '../src/wasm/wasm-engine.h',
+ '../src/wasm/wasm-external-refs.cc',
+ '../src/wasm/wasm-external-refs.h',
+ '../src/wasm/wasm-js.cc',
+ '../src/wasm/wasm-js.h',
+ '../src/wasm/wasm-limits.h',
+ '../src/wasm/wasm-memory.cc',
+ '../src/wasm/wasm-memory.h',
+ '../src/wasm/wasm-module.cc',
+ '../src/wasm/wasm-module.h',
+ '../src/wasm/wasm-module-builder.cc',
+ '../src/wasm/wasm-module-builder.h',
+ '../src/wasm/wasm-interpreter.cc',
+ '../src/wasm/wasm-interpreter.h',
+ '../src/wasm/wasm-objects-inl.h',
+ '../src/wasm/wasm-objects.cc',
+ '../src/wasm/wasm-objects.h',
+ '../src/wasm/wasm-opcodes.cc',
+ '../src/wasm/wasm-opcodes.h',
+ '../src/wasm/wasm-result.cc',
+ '../src/wasm/wasm-result.h',
+ '../src/wasm/wasm-serialization.cc',
+ '../src/wasm/wasm-serialization.h',
+ '../src/wasm/wasm-text.cc',
+ '../src/wasm/wasm-text.h',
+ '../src/wasm/wasm-value.h',
+ '../src/zone/accounting-allocator.cc',
+ '../src/zone/accounting-allocator.h',
+ '../src/zone/zone-segment.cc',
+ '../src/zone/zone-segment.h',
+ '../src/zone/zone.cc',
+ '../src/zone/zone.h',
+ '../src/zone/zone-chunk-list.h',
+ '../src/zone/zone-segment.cc',
+ '../src/zone/zone-segment.h',
+ '../src/zone/zone-allocator.h',
+ '../src/zone/zone-containers.h',
+ '../src/zone/zone-handle-set.h',
+ '../src/zone/zone-list-inl.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['v8_target_arch=="arm"', {
+ 'sources': [
+ '../src/arm/assembler-arm-inl.h',
+ '../src/arm/assembler-arm.cc',
+ '../src/arm/assembler-arm.h',
+ '../src/arm/code-stubs-arm.cc',
+ '../src/arm/code-stubs-arm.h',
+ '../src/arm/codegen-arm.cc',
+ '../src/arm/constants-arm.h',
+ '../src/arm/constants-arm.cc',
+ '../src/arm/cpu-arm.cc',
+ '../src/arm/deoptimizer-arm.cc',
+ '../src/arm/disasm-arm.cc',
+ '../src/arm/frame-constants-arm.cc',
+ '../src/arm/frame-constants-arm.h',
+ '../src/arm/interface-descriptors-arm.cc',
+ '../src/arm/interface-descriptors-arm.h',
+ '../src/arm/macro-assembler-arm.cc',
+ '../src/arm/macro-assembler-arm.h',
+ '../src/arm/simulator-arm.cc',
+ '../src/arm/simulator-arm.h',
+ '../src/arm/eh-frame-arm.cc',
+ '../src/compiler/arm/code-generator-arm.cc',
+ '../src/compiler/arm/instruction-codes-arm.h',
+ '../src/compiler/arm/instruction-scheduler-arm.cc',
+ '../src/compiler/arm/instruction-selector-arm.cc',
+ '../src/compiler/arm/unwinding-info-writer-arm.cc',
+ '../src/compiler/arm/unwinding-info-writer-arm.h',
+ '../src/debug/arm/debug-arm.cc',
+ '../src/regexp/arm/regexp-macro-assembler-arm.cc',
+ '../src/regexp/arm/regexp-macro-assembler-arm.h',
+ '../src/wasm/baseline/arm/liftoff-assembler-arm.h',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [
+ '../src/arm64/assembler-arm64.cc',
+ '../src/arm64/assembler-arm64.h',
+ '../src/arm64/assembler-arm64-inl.h',
+ '../src/arm64/codegen-arm64.cc',
+ '../src/arm64/code-stubs-arm64.cc',
+ '../src/arm64/code-stubs-arm64.h',
+ '../src/arm64/constants-arm64.h',
+ '../src/arm64/cpu-arm64.cc',
+ '../src/arm64/decoder-arm64.cc',
+ '../src/arm64/decoder-arm64.h',
+ '../src/arm64/decoder-arm64-inl.h',
+ '../src/arm64/deoptimizer-arm64.cc',
+ '../src/arm64/disasm-arm64.cc',
+ '../src/arm64/disasm-arm64.h',
+ '../src/arm64/frame-constants-arm64.cc',
+ '../src/arm64/frame-constants-arm64.h',
+ '../src/arm64/instructions-arm64-constants.cc',
+ '../src/arm64/instructions-arm64.cc',
+ '../src/arm64/instructions-arm64.h',
+ '../src/arm64/instrument-arm64.cc',
+ '../src/arm64/instrument-arm64.h',
+ '../src/arm64/interface-descriptors-arm64.cc',
+ '../src/arm64/interface-descriptors-arm64.h',
+ '../src/arm64/macro-assembler-arm64.cc',
+ '../src/arm64/macro-assembler-arm64.h',
+ '../src/arm64/macro-assembler-arm64-inl.h',
+ '../src/arm64/simulator-arm64.cc',
+ '../src/arm64/simulator-arm64.h',
+ '../src/arm64/simulator-logic-arm64.cc',
+ '../src/arm64/utils-arm64.cc',
+ '../src/arm64/utils-arm64.h',
+ '../src/arm64/eh-frame-arm64.cc',
+ '../src/compiler/arm64/code-generator-arm64.cc',
+ '../src/compiler/arm64/instruction-codes-arm64.h',
+ '../src/compiler/arm64/instruction-scheduler-arm64.cc',
+ '../src/compiler/arm64/instruction-selector-arm64.cc',
+ '../src/compiler/arm64/unwinding-info-writer-arm64.cc',
+ '../src/compiler/arm64/unwinding-info-writer-arm64.h',
+ '../src/debug/arm64/debug-arm64.cc',
+ '../src/regexp/arm64/regexp-macro-assembler-arm64.cc',
+ '../src/regexp/arm64/regexp-macro-assembler-arm64.h',
+ '../src/wasm/baseline/arm64/liftoff-assembler-arm64.h',
+ ],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'sources': [
+ '../src/ia32/assembler-ia32-inl.h',
+ '../src/ia32/assembler-ia32.cc',
+ '../src/ia32/assembler-ia32.h',
+ '../src/ia32/code-stubs-ia32.cc',
+ '../src/ia32/codegen-ia32.cc',
+ '../src/ia32/cpu-ia32.cc',
+ '../src/ia32/deoptimizer-ia32.cc',
+ '../src/ia32/disasm-ia32.cc',
+ '../src/ia32/frame-constants-ia32.cc',
+ '../src/ia32/frame-constants-ia32.h',
+ '../src/ia32/interface-descriptors-ia32.cc',
+ '../src/ia32/macro-assembler-ia32.cc',
+ '../src/ia32/macro-assembler-ia32.h',
+ '../src/ia32/simulator-ia32.cc',
+ '../src/ia32/simulator-ia32.h',
+ '../src/ia32/sse-instr.h',
+ '../src/compiler/ia32/code-generator-ia32.cc',
+ '../src/compiler/ia32/instruction-codes-ia32.h',
+ '../src/compiler/ia32/instruction-scheduler-ia32.cc',
+ '../src/compiler/ia32/instruction-selector-ia32.cc',
+ '../src/debug/ia32/debug-ia32.cc',
+ '../src/regexp/ia32/regexp-macro-assembler-ia32.cc',
+ '../src/regexp/ia32/regexp-macro-assembler-ia32.h',
+ '../src/wasm/baseline/ia32/liftoff-assembler-ia32.h',
+ ],
+ }],
+ ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
+ 'sources': [
+ '../src/mips/assembler-mips.cc',
+ '../src/mips/assembler-mips.h',
+ '../src/mips/assembler-mips-inl.h',
+ '../src/mips/codegen-mips.cc',
+ '../src/mips/code-stubs-mips.cc',
+ '../src/mips/code-stubs-mips.h',
+ '../src/mips/constants-mips.cc',
+ '../src/mips/constants-mips.h',
+ '../src/mips/cpu-mips.cc',
+ '../src/mips/deoptimizer-mips.cc',
+ '../src/mips/disasm-mips.cc',
+ '../src/mips/frame-constants-mips.cc',
+ '../src/mips/frame-constants-mips.h',
+ '../src/mips/interface-descriptors-mips.cc',
+ '../src/mips/macro-assembler-mips.cc',
+ '../src/mips/macro-assembler-mips.h',
+ '../src/mips/simulator-mips.cc',
+ '../src/mips/simulator-mips.h',
+ '../src/compiler/mips/code-generator-mips.cc',
+ '../src/compiler/mips/instruction-codes-mips.h',
+ '../src/compiler/mips/instruction-scheduler-mips.cc',
+ '../src/compiler/mips/instruction-selector-mips.cc',
+ '../src/debug/mips/debug-mips.cc',
+ '../src/regexp/mips/regexp-macro-assembler-mips.cc',
+ '../src/regexp/mips/regexp-macro-assembler-mips.h',
+ '../src/wasm/baseline/mips/liftoff-assembler-mips.h',
+ ],
+ }],
+ ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
+ 'sources': [
+ '../src/mips64/assembler-mips64.cc',
+ '../src/mips64/assembler-mips64.h',
+ '../src/mips64/assembler-mips64-inl.h',
+ '../src/mips64/codegen-mips64.cc',
+ '../src/mips64/code-stubs-mips64.cc',
+ '../src/mips64/code-stubs-mips64.h',
+ '../src/mips64/constants-mips64.cc',
+ '../src/mips64/constants-mips64.h',
+ '../src/mips64/cpu-mips64.cc',
+ '../src/mips64/deoptimizer-mips64.cc',
+ '../src/mips64/disasm-mips64.cc',
+ '../src/mips64/frame-constants-mips64.cc',
+ '../src/mips64/frame-constants-mips64.h',
+ '../src/mips64/interface-descriptors-mips64.cc',
+ '../src/mips64/macro-assembler-mips64.cc',
+ '../src/mips64/macro-assembler-mips64.h',
+ '../src/mips64/simulator-mips64.cc',
+ '../src/mips64/simulator-mips64.h',
+ '../src/compiler/mips64/code-generator-mips64.cc',
+ '../src/compiler/mips64/instruction-codes-mips64.h',
+ '../src/compiler/mips64/instruction-scheduler-mips64.cc',
+ '../src/compiler/mips64/instruction-selector-mips64.cc',
+ '../src/debug/mips64/debug-mips64.cc',
+ '../src/regexp/mips64/regexp-macro-assembler-mips64.cc',
+ '../src/regexp/mips64/regexp-macro-assembler-mips64.h',
+ '../src/wasm/baseline/mips64/liftoff-assembler-mips64.h',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [
+ '../src/compiler/x64/code-generator-x64.cc',
+ '../src/compiler/x64/instruction-codes-x64.h',
+ '../src/compiler/x64/instruction-scheduler-x64.cc',
+ '../src/compiler/x64/instruction-selector-x64.cc',
+ '../src/compiler/x64/unwinding-info-writer-x64.cc',
+ '../src/compiler/x64/unwinding-info-writer-x64.h',
+ '../src/x64/assembler-x64-inl.h',
+ '../src/x64/assembler-x64.cc',
+ '../src/x64/assembler-x64.h',
+ '../src/x64/code-stubs-x64.cc',
+ '../src/x64/codegen-x64.cc',
+ '../src/x64/cpu-x64.cc',
+ '../src/x64/deoptimizer-x64.cc',
+ '../src/x64/disasm-x64.cc',
+ '../src/x64/eh-frame-x64.cc',
+ '../src/x64/frame-constants-x64.cc',
+ '../src/x64/frame-constants-x64.h',
+ '../src/x64/interface-descriptors-x64.cc',
+ '../src/x64/macro-assembler-x64.cc',
+ '../src/x64/macro-assembler-x64.h',
+ '../src/x64/simulator-x64.cc',
+ '../src/x64/simulator-x64.h',
+ '../src/x64/sse-instr.h',
+ '../src/debug/x64/debug-x64.cc',
+ '../src/regexp/x64/regexp-macro-assembler-x64.cc',
+ '../src/regexp/x64/regexp-macro-assembler-x64.h',
+ '../src/third_party/valgrind/valgrind.h',
+ '../src/wasm/baseline/x64/liftoff-assembler-x64.h',
+ ],
+ }],
+ ['v8_target_arch=="x64" and OS=="linux"', {
+ 'sources': ['../src/trap-handler/handler-inside.cc']
+ }],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [
+ '../src/compiler/ppc/code-generator-ppc.cc',
+ '../src/compiler/ppc/instruction-codes-ppc.h',
+ '../src/compiler/ppc/instruction-scheduler-ppc.cc',
+ '../src/compiler/ppc/instruction-selector-ppc.cc',
+ '../src/debug/ppc/debug-ppc.cc',
+ '../src/ppc/assembler-ppc-inl.h',
+ '../src/ppc/assembler-ppc.cc',
+ '../src/ppc/assembler-ppc.h',
+ '../src/ppc/code-stubs-ppc.cc',
+ '../src/ppc/code-stubs-ppc.h',
+ '../src/ppc/codegen-ppc.cc',
+ '../src/ppc/constants-ppc.h',
+ '../src/ppc/constants-ppc.cc',
+ '../src/ppc/cpu-ppc.cc',
+ '../src/ppc/deoptimizer-ppc.cc',
+ '../src/ppc/disasm-ppc.cc',
+ '../src/ppc/frame-constants-ppc.cc',
+ '../src/ppc/frame-constants-ppc.h',
+ '../src/ppc/interface-descriptors-ppc.cc',
+ '../src/ppc/macro-assembler-ppc.cc',
+ '../src/ppc/macro-assembler-ppc.h',
+ '../src/ppc/simulator-ppc.cc',
+ '../src/ppc/simulator-ppc.h',
+ '../src/regexp/ppc/regexp-macro-assembler-ppc.cc',
+ '../src/regexp/ppc/regexp-macro-assembler-ppc.h',
+ '../src/wasm/baseline/ppc/liftoff-assembler-ppc.h',
+ ],
+ }],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'sources': [
+ '../src/compiler/s390/code-generator-s390.cc',
+ '../src/compiler/s390/instruction-codes-s390.h',
+ '../src/compiler/s390/instruction-scheduler-s390.cc',
+ '../src/compiler/s390/instruction-selector-s390.cc',
+ '../src/debug/s390/debug-s390.cc',
+ '../src/regexp/s390/regexp-macro-assembler-s390.cc',
+ '../src/regexp/s390/regexp-macro-assembler-s390.h',
+ '../src/s390/assembler-s390.cc',
+ '../src/s390/assembler-s390.h',
+ '../src/s390/assembler-s390-inl.h',
+ '../src/s390/codegen-s390.cc',
+ '../src/s390/code-stubs-s390.cc',
+ '../src/s390/code-stubs-s390.h',
+ '../src/s390/constants-s390.cc',
+ '../src/s390/constants-s390.h',
+ '../src/s390/cpu-s390.cc',
+ '../src/s390/deoptimizer-s390.cc',
+ '../src/s390/disasm-s390.cc',
+ '../src/s390/frame-constants-s390.cc',
+ '../src/s390/frame-constants-s390.h',
+ '../src/s390/interface-descriptors-s390.cc',
+ '../src/s390/macro-assembler-s390.cc',
+ '../src/s390/macro-assembler-s390.h',
+ '../src/s390/simulator-s390.cc',
+ '../src/s390/simulator-s390.h',
+ '../src/wasm/baseline/s390/liftoff-assembler-s390.h',
+ ],
+ }],
+ ['OS=="win"', {
+ 'variables': {
+ 'gyp_generators': '<!(echo $GYP_GENERATORS)',
+ },
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
+ # When building Official, the .lib is too large and exceeds the 2G
+ # limit. This breaks it into multiple pieces to avoid the limit.
+ # See http://crbug.com/485155.
+ 'msvs_shard': 4,
+ # This will prevent V8's .cc files conflicting with the inspector's
+ # .cpp files in the same shard.
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'ObjectFile':'$(IntDir)%(Extension)\\',
+ },
+ },
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ ],
+ }],
+ ['v8_postmortem_support=="true"', {
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+ ]
+ }],
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ 'conditions': [
+ ['icu_use_data_file_flag==1', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
+ }, { # else icu_use_data_file_flag !=1
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
+ }, {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
+ }],
+ ],
+ }],
+ ],
+ }, { # v8_enable_i18n_support==0
+ 'sources!': [
+ '../src/builtins/builtins-intl.cc',
+ '../src/builtins/builtins-intl.h',
+ '../src/char-predicates.cc',
+ '../src/intl.cc',
+ '../src/intl.h',
+ '../src/objects/intl-objects.cc',
+ '../src/objects/intl-objects.h',
+ '../src/runtime/runtime-intl.cc',
+ ],
+ }],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icudata',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_libbase',
+ 'type': '<(component)',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ ],
+ 'sources': [
+ '../src/base/adapters.h',
+ '../src/base/atomic-utils.h',
+ '../src/base/atomicops.h',
+ '../src/base/atomicops_internals_atomicword_compat.h',
+ '../src/base/atomicops_internals_portable.h',
+ '../src/base/atomicops_internals_std.h',
+ '../src/base/base-export.h',
+ '../src/base/bits.cc',
+ '../src/base/bits.h',
+ '../src/base/build_config.h',
+ '../src/base/compiler-specific.h',
+ '../src/base/cpu.cc',
+ '../src/base/cpu.h',
+ '../src/base/division-by-constant.cc',
+ '../src/base/division-by-constant.h',
+ '../src/base/debug/stack_trace.cc',
+ '../src/base/debug/stack_trace.h',
+ '../src/base/export-template.h',
+ '../src/base/file-utils.cc',
+ '../src/base/file-utils.h',
+ '../src/base/flags.h',
+ '../src/base/format-macros.h',
+ '../src/base/free_deleter.h',
+ '../src/base/functional.cc',
+ '../src/base/functional.h',
+ '../src/base/hashmap.h',
+ '../src/base/hashmap-entry.h',
+ '../src/base/ieee754.cc',
+ '../src/base/ieee754.h',
+ '../src/base/iterator.h',
+ '../src/base/lazy-instance.h',
+ '../src/base/logging.cc',
+ '../src/base/logging.h',
+ '../src/base/macros.h',
+ '../src/base/once.cc',
+ '../src/base/once.h',
+ '../src/base/optional.h',
+ '../src/base/page-allocator.cc',
+ '../src/base/page-allocator.h',
+ '../src/base/platform/elapsed-timer.h',
+ '../src/base/platform/time.cc',
+ '../src/base/platform/time.h',
+ '../src/base/platform/condition-variable.cc',
+ '../src/base/platform/condition-variable.h',
+ '../src/base/platform/mutex.cc',
+ '../src/base/platform/mutex.h',
+ '../src/base/platform/platform.h',
+ '../src/base/platform/semaphore.cc',
+ '../src/base/platform/semaphore.h',
+ '../src/base/ring-buffer.h',
+ '../src/base/safe_conversions.h',
+ '../src/base/safe_conversions_impl.h',
+ '../src/base/safe_math.h',
+ '../src/base/safe_math_impl.h',
+ '../src/base/sys-info.cc',
+ '../src/base/sys-info.h',
+ '../src/base/template-utils.h',
+ '../src/base/timezone-cache.h',
+ '../src/base/tsan.h',
+ '../src/base/utils/random-number-generator.cc',
+ '../src/base/utils/random-number-generator.h',
+ ],
+ 'target_conditions': [
+ ['OS=="android" and _toolset=="target"', {
+ 'libraries': [
+ '-llog',
+ ],
+ 'include_dirs': [
+ 'src/common/android/include',
+ ],
+ }],
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_BASE_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_BASE_SHARED',
+ ],
+ },
+ }],
+ ['OS=="linux"', {
+ 'link_settings': {
+ 'libraries': [
+ '-ldl',
+ '-lrt'
+ ],
+ },
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-linux.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ '../src/base/platform/platform-posix-time.h',
+ '../src/base/platform/platform-posix-time.cc',
+ ],
+ }
+ ],
+ ['OS=="android"', {
+ 'sources': [
+ '../src/base/debug/stack_trace_android.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ '../src/base/platform/platform-posix-time.h',
+ '../src/base/platform/platform-posix-time.cc',
+ ],
+ 'link_settings': {
+ 'target_conditions': [
+ ['_toolset=="host" and host_os!="mac"', {
+ # Only include libdl and librt on host builds because they
+ # are included by default on Android target builds, and we
+ # don't want to re-include them here since this will change
+ # library order and break (see crbug.com/469973).
+ # These libraries do not exist on Mac hosted builds.
+ 'libraries': [
+ '-ldl',
+ '-lrt'
+ ]
+ }]
+ ]
+ },
+ 'conditions': [
+ ['host_os=="mac"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'sources': [
+ '../src/base/platform/platform-macos.cc'
+ ]
+ }, {
+ 'sources': [
+ '../src/base/platform/platform-linux.cc'
+ ]
+ }],
+ ],
+ }, {
+ 'sources': [
+ '../src/base/platform/platform-linux.cc'
+ ]
+ }],
+ ],
+ },
+ ],
+ ['OS=="qnx"', {
+ 'link_settings': {
+ 'target_conditions': [
+ ['_toolset=="host" and host_os=="linux"', {
+ 'libraries': [
+ '-lrt'
+ ],
+ }],
+ ['_toolset=="target"', {
+ 'libraries': [
+ '-lbacktrace'
+ ],
+ }],
+ ],
+ },
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ '../src/base/platform/platform-posix-time.h',
+ '../src/base/platform/platform-posix-time.cc',
+ '../src/base/qnx-math.h'
+ ],
+ 'target_conditions': [
+ ['_toolset=="host" and host_os=="linux"', {
+ 'sources': [
+ '../src/base/platform/platform-linux.cc'
+ ],
+ }],
+ ['_toolset=="host" and host_os=="mac"', {
+ 'sources': [
+ '../src/base/platform/platform-macos.cc'
+ ],
+ }],
+ ['_toolset=="target"', {
+ 'sources': [
+ '../src/base/platform/platform-qnx.cc'
+ ],
+ }],
+ ],
+ },
+ ],
+ ['OS=="freebsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ]},
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-freebsd.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ '../src/base/platform/platform-posix-time.h',
+ '../src/base/platform/platform-posix-time.cc',
+ ],
+ }
+ ],
+ ['OS=="openbsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ]},
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-openbsd.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ '../src/base/platform/platform-posix-time.h',
+ '../src/base/platform/platform-posix-time.cc',
+ ],
+ }
+ ],
+ ['OS=="netbsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
+ ]},
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-openbsd.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ '../src/base/platform/platform-posix-time.h',
+ '../src/base/platform/platform-posix-time.cc',
+ ],
+ }
+ ],
+ ['OS=="aix"', {
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-aix.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc'
+ ]},
+ ],
+ ['OS=="fuchsia"', {
+ 'sources': [
+ '../src/base/debug/stack_trace_fuchsia.cc',
+ '../src/base/platform/platform-fuchsia.cc',
+ ]},
+ ],
+ ['OS=="solaris"', {
+ 'link_settings': {
+ 'libraries': [
+ '-lnsl -lrt',
+ ]},
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-solaris.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ ],
+ }
+ ],
+ ['OS=="mac"', {
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-macos.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ '../src/base/platform/platform-posix-time.h',
+ '../src/base/platform/platform-posix-time.cc',
+ ]},
+ ],
+ ['OS=="win"', {
+ 'defines': [
+ '_CRT_RAND_S' # for rand_s()
+ ],
+ 'variables': {
+ 'gyp_generators': '<!(echo $GYP_GENERATORS)',
+ },
+ 'conditions': [
+ ['gyp_generators=="make"', {
+ 'variables': {
+ 'build_env': '<!(uname -o)',
+ },
+ 'conditions': [
+ ['build_env=="Cygwin"', {
+ 'sources': [
+ '../src/base/debug/stack_trace_posix.cc',
+ '../src/base/platform/platform-cygwin.cc',
+ '../src/base/platform/platform-posix.h',
+ '../src/base/platform/platform-posix.cc',
+ ],
+ }, {
+ 'sources': [
+ '../src/base/debug/stack_trace_win.cc',
+ '../src/base/platform/platform-win32.cc',
+ '../src/base/win32-headers.h',
+ ],
+ }],
+ ],
+ 'link_settings': {
+ 'libraries': [ '-lwinmm', '-lws2_32' ],
+ },
+ }, {
+ 'sources': [
+ '../src/base/debug/stack_trace_win.cc',
+ '../src/base/platform/platform-win32.cc',
+ '../src/base/win32-headers.h',
+ ],
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
+ 'link_settings': {
+ 'libraries': [
+ '-ldbghelp.lib',
+ '-lshlwapi.lib',
+ '-lwinmm.lib',
+ '-lws2_32.lib'
+ ],
+ },
+ }],
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_libplatform',
+ 'type': '<(component)',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ 'v8_libbase',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ '../include/',
+ ],
+ 'sources': [
+ '../include//libplatform/libplatform.h',
+ '../include//libplatform/libplatform-export.h',
+ '../include//libplatform/v8-tracing.h',
+ '../src/libplatform/default-background-task-runner.cc',
+ '../src/libplatform/default-background-task-runner.h',
+ '../src/libplatform/default-foreground-task-runner.cc',
+ '../src/libplatform/default-foreground-task-runner.h',
+ '../src/libplatform/default-platform.cc',
+ '../src/libplatform/default-platform.h',
+ '../src/libplatform/task-queue.cc',
+ '../src/libplatform/task-queue.h',
+ '../src/libplatform/tracing/trace-buffer.cc',
+ '../src/libplatform/tracing/trace-buffer.h',
+ '../src/libplatform/tracing/trace-config.cc',
+ '../src/libplatform/tracing/trace-object.cc',
+ '../src/libplatform/tracing/trace-writer.cc',
+ '../src/libplatform/tracing/trace-writer.h',
+ '../src/libplatform/tracing/tracing-controller.cc',
+ '../src/libplatform/worker-thread.cc',
+ '../src/libplatform/worker-thread.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['component=="shared_library"', {
+ 'direct_dependent_settings': {
+ 'defines': [ 'USING_V8_PLATFORM_SHARED' ],
+ },
+ 'defines': [ 'BUILDING_V8_PLATFORM_SHARED' ],
+ }]
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include/',
+ ],
+ },
+ },
+ {
+ 'target_name': 'v8_libsampler',
+ 'type': 'static_library',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ 'v8_libbase',
+ ],
+ 'include_dirs+': [
+ '..',
+ '../include/',
+ ],
+ 'sources': [
+ '../src/libsampler/sampler.cc',
+ '../src/libsampler/sampler.h'
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include/',
+ ],
+ },
+ },
+ {
+ 'target_name': 'natives_blob',
+ 'type': 'none',
+ 'conditions': [
+ [ 'v8_use_external_startup_data==1', {
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'dependencies': ['js2c#host'],
+ }, {
+ 'dependencies': ['js2c'],
+ }],
+ ],
+ 'actions': [{
+ 'action_name': 'concatenate_natives_blob',
+ 'inputs': [
+ '../tools//concatenate-files.py',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob_host.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+ ],
+ }],
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+ ],
+ }],
+ ],
+ }],
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ]
+ },
+ {
+ 'target_name': 'js2c',
+ 'type': 'none',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'variables': {
+ 'library_files': [
+ '../src/js/macros.py',
+ '../src/messages.h',
+ '../src/js/prologue.js',
+ '../src/js/array.js',
+ '../src/js/typedarray.js',
+ '../src/js/messages.js',
+ '../src/js/spread.js',
+ '../src/debug/mirrors.js',
+ '../src/debug/debug.js',
+ '../src/debug/liveedit.js',
+ ],
+ 'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+ 'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ 'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'library_files': ['../src/js/intl.js'],
+ }],
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'js2c',
+ 'inputs': [
+ '../tools//js2c.py',
+ '<@(library_files)',
+ ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
+ 'action': [
+ 'python',
+ '../tools//js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ 'CORE',
+ '<@(library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_bin',
+ 'inputs': [
+ '../tools//js2c.py',
+ '<@(library_files)',
+ ],
+ 'outputs': ['<@(libraries_bin_file)'],
+ 'action': [
+ 'python',
+ '../tools//js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ 'CORE',
+ '<@(library_files)',
+ '--startup_blob', '<@(libraries_bin_file)',
+ '--nojs',
+ ],
+ },
+ {
+ 'action_name': 'js2c_extras',
+ 'inputs': [
+ '../tools//js2c.py',
+ '<@(v8_extra_library_files)',
+ ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
+ 'action': [
+ 'python',
+ '../tools//js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ 'EXTRAS',
+ '<@(v8_extra_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_extras_bin',
+ 'inputs': [
+ '../tools//js2c.py',
+ '<@(v8_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_extras_bin_file)'],
+ 'action': [
+ 'python',
+ '../tools//js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ 'EXTRAS',
+ '<@(v8_extra_library_files)',
+ '--startup_blob', '<@(libraries_extras_bin_file)',
+ '--nojs',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras',
+ 'inputs': [
+ '../tools//js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ ],
+ 'action': [
+ 'python',
+ '../tools//js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras_bin',
+ 'inputs': [
+ '../tools//js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_experimental_extras_bin_file)'],
+ 'action': [
+ 'python',
+ '../tools//js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
+ '--nojs',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'postmortem-metadata',
+ 'type': 'none',
+ 'variables': {
+ 'heapobject_files': [
+ '../src/objects.h',
+ '../src/objects-inl.h',
+ '../src/objects/code.h',
+ '../src/objects/code-inl.h',
+ '../src/objects/fixed-array.h',
+ '../src/objects/fixed-array-inl.h',
+ '../src/objects/js-array.h',
+ '../src/objects/js-array-inl.h',
+ '../src/objects/js-regexp.h',
+ '../src/objects/js-regexp-inl.h',
+ '../src/objects/map.h',
+ '../src/objects/map-inl.h',
+ '../src/objects/script.h',
+ '../src/objects/script-inl.h',
+ '../src/objects/shared-function-info.h',
+ '../src/objects/shared-function-info-inl.h',
+ '../src/objects/string.h',
+ '../src/objects/string-inl.h',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'gen-postmortem-metadata',
+ 'inputs': [
+ '../tools//gen-postmortem-metadata.py',
+ '<@(heapobject_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+ ],
+ 'action': [
+ 'python',
+ '../tools//gen-postmortem-metadata.py',
+ '<@(_outputs)',
+ '<@(heapobject_files)'
+ ]
+ }
+ ]
+ },
+ {
+ 'target_name': 'mksnapshot',
+ 'type': 'executable',
+ 'dependencies': [
+ 'v8_base',
+ 'v8_init',
+ 'v8_libbase',
+ 'v8_libplatform',
+ 'v8_nosnapshot',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ '../src/snapshot/mksnapshot.cc',
+ ],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ]
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_dump_build_config',
+ 'type': 'none',
+ 'variables': {
+ },
+ 'actions': [
+ {
+ 'action_name': 'v8_dump_build_config',
+ 'inputs': [
+ '../tools//testrunner/utils/dump_build_config_gyp.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/v8_build_config.json',
+ ],
+ 'action': [
+ 'python',
+ '../tools//testrunner/utils/dump_build_config_gyp.py',
+ '<(PRODUCT_DIR)/v8_build_config.json',
+ 'dcheck_always_on=<(dcheck_always_on)',
+ 'is_asan=<(asan)',
+ 'is_cfi=<(cfi_vptr)',
+ 'is_component_build=<(component)',
+ 'is_debug=<(CONFIGURATION_NAME)',
+ # Not available in gyp.
+ 'is_gcov_coverage=0',
+ 'is_msan=<(msan)',
+ 'is_tsan=<(tsan)',
+ # Not available in gyp.
+ 'is_ubsan_vptr=0',
+ 'target_cpu=<(target_arch)',
+ 'v8_enable_i18n_support=<(v8_enable_i18n_support)',
+ 'v8_enable_verify_predictable=<(v8_enable_verify_predictable)',
+ 'v8_target_cpu=<(v8_target_arch)',
+ 'v8_use_snapshot=<(v8_use_snapshot)',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="mips" or v8_target_arch=="mipsel" \
+ or v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
+ 'action':[
+ 'mips_arch_variant=<(mips_arch_variant)',
+ 'mips_use_msa=<(mips_use_msa)',
+ ],
+ }],
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'v8_monolith',
+ 'type': 'none',
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include/',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'build_with_gn',
+ 'inputs': [
+ '../tools//node/build_gn.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/gn/obj/libv8_monolith.a',
+ '<(INTERMEDIATE_DIR)/gn/args.gn',
+ ],
+ 'action': [
+ '../tools//node/build_gn.py',
+ '--mode', '<(CONFIGURATION_NAME)',
+ '--v8_path', '../',
+ '--build_path', '<(INTERMEDIATE_DIR)/gn',
+ '--host_os', '<(host_os)',
+ '--flag', 'v8_promise_internal_field_count=<(v8_promise_internal_field_count)',
+ '--flag', 'target_cpu="<(target_arch)"',
+ '--flag', 'target_os="<(OS)"',
+ '--flag', 'v8_target_cpu="<(v8_target_arch)"',
+ '--flag', 'v8_embedder_string="<(v8_embedder_string)"',
+ '--flag', 'v8_use_snapshot=<(v8_use_snapshot)',
+ '--flag', 'v8_optimized_debug=<(v8_optimized_debug)',
+ '--flag', 'v8_enable_disassembler=<(v8_enable_disassembler)',
+ '--flag', 'v8_postmortem_support=<(v8_postmortem_support)',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/gypfiles/v8vtune.gyp b/deps/v8/gypfiles/v8vtune.gyp
new file mode 100644
index 0000000000..2a756d4b80
--- /dev/null
+++ b/deps/v8/gypfiles/v8vtune.gyp
@@ -0,0 +1,36 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['toolchain.gypi', 'features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'v8_vtune',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8.gyp:v8',
+ ],
+ 'sources': [
+ '../src/third_party/vtune/ittnotify_config.h',
+ '../src/third_party/vtune/ittnotify_types.h',
+ '../src/third_party/vtune/jitprofiling.cc',
+ '../src/third_party/vtune/jitprofiling.h',
+ '../src/third_party/vtune/v8-vtune.h',
+ '../src/third_party/vtune/vtune-jit.cc',
+ '../src/third_party/vtune/vtune-jit.h',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': ['ENABLE_VTUNE_JIT_INTERFACE',],
+ 'conditions': [
+ ['OS != "win"', {
+ 'libraries': ['-ldl',],
+ }],
+ ],
+ },
+ },
+ ],
+}
diff --git a/deps/v8/tools/verify_source_deps.py b/deps/v8/gypfiles/verify_source_deps.py
index c49d51ab5d..714c6fe84b 100755..100644
--- a/deps/v8/tools/verify_source_deps.py
+++ b/deps/v8/gypfiles/verify_source_deps.py
@@ -22,19 +22,19 @@ import sys
V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
GYP_FILES = [
- os.path.join(V8_BASE, 'src', 'd8.gyp'),
- os.path.join(V8_BASE, 'src', 'v8.gyp'),
- os.path.join(V8_BASE, 'src', 'inspector', 'inspector.gypi'),
- os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'),
- os.path.join(V8_BASE, 'samples', 'samples.gyp'),
- os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'),
- os.path.join(V8_BASE, 'test', 'fuzzer', 'fuzzer.gyp'),
- os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
- os.path.join(V8_BASE, 'test', 'inspector', 'inspector.gyp'),
- os.path.join(V8_BASE, 'test', 'mkgrokdump', 'mkgrokdump.gyp'),
- os.path.join(V8_BASE, 'testing', 'gmock.gyp'),
- os.path.join(V8_BASE, 'testing', 'gtest.gyp'),
- os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'd8.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'v8.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'inspector.gypi'),
+ os.path.join(V8_BASE, 'gypfiles', 'v8vtune.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'samples.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'cctest.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'fuzzer.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'unittests.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'inspector-test.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'mkgrokdump.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'gmock.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'gtest.gyp'),
+ os.path.join(V8_BASE, 'gypfiles', 'parser-shell.gyp'),
]
ALL_GYP_PREFIXES = [
diff --git a/deps/v8/include/PRESUBMIT.py b/deps/v8/include/PRESUBMIT.py
index 386f2e5006..8002e4dcac 100644
--- a/deps/v8/include/PRESUBMIT.py
+++ b/deps/v8/include/PRESUBMIT.py
@@ -24,6 +24,6 @@ def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
- 'master.tryserver.chromium.linux:linux_chromium_rel_ng'
+ 'luci.chromium.try:linux_chromium_rel_ng'
],
'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
deleted file mode 100644
index 54c0f216aa..0000000000
--- a/deps/v8/include/v8-debug.h
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_V8_DEBUG_H_
-#define V8_V8_DEBUG_H_
-
-#include "v8.h" // NOLINT(build/include)
-
-/**
- * ATTENTION: The debugger API exposed by this file is deprecated and will be
- * removed by the end of 2017. Please use the V8 inspector declared
- * in include/v8-inspector.h instead.
- */
-namespace v8 {
-
-// Debug events which can occur in the V8 JavaScript engine.
-enum DebugEvent {
- Break = 1,
- Exception = 2,
- AfterCompile = 3,
- CompileError = 4,
- AsyncTaskEvent = 5,
-};
-
-class V8_EXPORT Debug {
- public:
- /**
- * A client object passed to the v8 debugger whose ownership will be taken by
- * it. v8 is always responsible for deleting the object.
- */
- class ClientData {
- public:
- virtual ~ClientData() {}
- };
-
-
- /**
- * A message object passed to the debug message handler.
- */
- class Message {
- public:
- /**
- * Check type of message.
- */
- virtual bool IsEvent() const = 0;
- virtual bool IsResponse() const = 0;
- virtual DebugEvent GetEvent() const = 0;
-
- /**
- * Indicate whether this is a response to a continue command which will
- * start the VM running after this is processed.
- */
- virtual bool WillStartRunning() const = 0;
-
- /**
- * Access to execution state and event data. Don't store these cross
- * callbacks as their content becomes invalid. These objects are from the
- * debugger event that started the debug message loop.
- */
- virtual Local<Object> GetExecutionState() const = 0;
- virtual Local<Object> GetEventData() const = 0;
-
- /**
- * Get the debugger protocol JSON.
- */
- virtual Local<String> GetJSON() const = 0;
-
- /**
- * Get the context active when the debug event happened. Note this is not
- * the current active context as the JavaScript part of the debugger is
- * running in its own context which is entered at this point.
- */
- virtual Local<Context> GetEventContext() const = 0;
-
- /**
- * Client data passed with the corresponding request if any. This is the
- * client_data data value passed into Debug::SendCommand along with the
- * request that led to the message or NULL if the message is an event. The
- * debugger takes ownership of the data and will delete it even if there is
- * no message handler.
- */
- virtual ClientData* GetClientData() const = 0;
-
- virtual Isolate* GetIsolate() const = 0;
-
- virtual ~Message() {}
- };
-
- /**
- * An event details object passed to the debug event listener.
- */
- class EventDetails {
- public:
- /**
- * Event type.
- */
- virtual DebugEvent GetEvent() const = 0;
-
- /**
- * Access to execution state and event data of the debug event. Don't store
- * these cross callbacks as their content becomes invalid.
- */
- virtual Local<Object> GetExecutionState() const = 0;
- virtual Local<Object> GetEventData() const = 0;
-
- /**
- * Get the context active when the debug event happened. Note this is not
- * the current active context as the JavaScript part of the debugger is
- * running in its own context which is entered at this point.
- */
- virtual Local<Context> GetEventContext() const = 0;
-
- /**
- * Client data passed with the corresponding callback when it was
- * registered.
- */
- virtual Local<Value> GetCallbackData() const = 0;
-
- /**
- * This is now a dummy that returns nullptr.
- */
- virtual ClientData* GetClientData() const = 0;
-
- virtual Isolate* GetIsolate() const = 0;
-
- virtual ~EventDetails() {}
- };
-
- /**
- * Debug event callback function.
- *
- * \param event_details object providing information about the debug event
- *
- * A EventCallback does not take possession of the event data,
- * and must not rely on the data persisting after the handler returns.
- */
- typedef void (*EventCallback)(const EventDetails& event_details);
-
- /**
- * This is now a no-op.
- */
- typedef void (*MessageHandler)(const Message& message);
-
- V8_DEPRECATED("No longer supported", static bool SetDebugEventListener(
- Isolate* isolate, EventCallback that,
- Local<Value> data = Local<Value>()));
-
- // Schedule a debugger break to happen when JavaScript code is run
- // in the given isolate.
- V8_DEPRECATED("No longer supported",
- static void DebugBreak(Isolate* isolate));
-
- // Remove scheduled debugger break in given isolate if it has not
- // happened yet.
- V8_DEPRECATED("No longer supported",
- static void CancelDebugBreak(Isolate* isolate));
-
- // Check if a debugger break is scheduled in the given isolate.
- V8_DEPRECATED("No longer supported",
- static bool CheckDebugBreak(Isolate* isolate));
-
- // This is now a no-op.
- V8_DEPRECATED("No longer supported",
- static void SetMessageHandler(Isolate* isolate,
- MessageHandler handler));
-
- // This is now a no-op.
- V8_DEPRECATED("No longer supported",
- static void SendCommand(Isolate* isolate,
- const uint16_t* command, int length,
- ClientData* client_data = NULL));
-
- /**
- * Run a JavaScript function in the debugger.
- * \param fun the function to call
- * \param data passed as second argument to the function
- * With this call the debugger is entered and the function specified is called
- * with the execution state as the first argument. This makes it possible to
- * get access to information otherwise not available during normal JavaScript
- * execution e.g. details on stack frames. Receiver of the function call will
- * be the debugger context global object, however this is a subject to change.
- * The following example shows a JavaScript function which when passed to
- * v8::Debug::Call will return the current line of JavaScript execution.
- *
- * \code
- * function frame_source_line(exec_state) {
- * return exec_state.frame(0).sourceLine();
- * }
- * \endcode
- */
- V8_DEPRECATED("No longer supported",
- static MaybeLocal<Value> Call(
- Local<Context> context, v8::Local<v8::Function> fun,
- Local<Value> data = Local<Value>()));
-
- // This is now a no-op.
- V8_DEPRECATED("No longer supported",
- static void ProcessDebugMessages(Isolate* isolate));
-
- /**
- * Debugger is running in its own context which is entered while debugger
- * messages are being dispatched. This is an explicit getter for this
- * debugger context. Note that the content of the debugger context is subject
- * to change. The Context exists only when the debugger is active, i.e. at
- * least one DebugEventListener or MessageHandler is set.
- */
- V8_DEPRECATED("Use v8-inspector",
- static Local<Context> GetDebugContext(Isolate* isolate));
-
- /**
- * While in the debug context, this method returns the top-most non-debug
- * context, if it exists.
- */
- V8_DEPRECATED(
- "No longer supported",
- static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate));
-
- /**
- * Enable/disable LiveEdit functionality for the given Isolate
- * (default Isolate if not provided). V8 will abort if LiveEdit is
- * unexpectedly used. LiveEdit is enabled by default.
- */
- V8_DEPRECATED("No longer supported",
- static void SetLiveEditEnabled(Isolate* isolate, bool enable));
-
- /**
- * Returns array of internal properties specific to the value type. Result has
- * the following format: [<name>, <value>,...,<name>, <value>]. Result array
- * will be allocated in the current context.
- */
- V8_DEPRECATED("No longer supported",
- static MaybeLocal<Array> GetInternalProperties(
- Isolate* isolate, Local<Value> value));
-
- /**
- * Defines if the ES2015 tail call elimination feature is enabled or not.
- * The change of this flag triggers deoptimization of all functions that
- * contain calls at tail position.
- */
- V8_DEPRECATED("No longer supported",
- static bool IsTailCallEliminationEnabled(Isolate* isolate));
- V8_DEPRECATED("No longer supported",
- static void SetTailCallEliminationEnabled(Isolate* isolate,
- bool enabled));
-};
-
-
-} // namespace v8
-
-
-#undef EXPORT
-
-
-#endif // V8_V8_DEBUG_H_
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index a86402be92..50c531559e 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -626,6 +626,68 @@ class V8_EXPORT AllocationProfile {
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
};
+/**
+ * An object graph consisting of embedder objects and V8 objects.
+ * Edges of the graph are strong references between the objects.
+ * The embedder can build this graph during heap snapshot generation
+ * to include the embedder objects in the heap snapshot.
+ * Usage:
+ * 1) Define derived class of EmbedderGraph::Node for embedder objects.
+ * 2) Set the build embedder graph callback on the heap profiler using
+ * HeapProfiler::SetBuildEmbedderGraphCallback.
+ * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
+ * node1 to node2.
+ * 4) To represent references from/to V8 object, construct V8 nodes using
+ * graph->V8Node(value).
+ */
+class V8_EXPORT EmbedderGraph {
+ public:
+ class Node {
+ public:
+ Node() = default;
+ virtual ~Node() = default;
+ virtual const char* Name() = 0;
+ virtual size_t SizeInBytes() = 0;
+ /**
+ * The corresponding V8 wrapper node if not null.
+ * During heap snapshot generation the embedder node and the V8 wrapper
+ * node will be merged into one node to simplify retaining paths.
+ */
+ virtual Node* WrapperNode() { return nullptr; }
+ virtual bool IsRootNode() { return false; }
+ /** Must return true for non-V8 nodes. */
+ virtual bool IsEmbedderNode() { return true; }
+ /**
+ * Optional name prefix. It is used in Chrome for tagging detached nodes.
+ */
+ virtual const char* NamePrefix() { return nullptr; }
+
+ private:
+ Node(const Node&) = delete;
+ Node& operator=(const Node&) = delete;
+ };
+
+ /**
+ * Returns a node corresponding to the given V8 value. Ownership is not
+ * transferred. The result pointer is valid while the graph is alive.
+ */
+ virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
+
+ /**
+ * Adds the given node to the graph and takes ownership of the node.
+ * Returns a raw pointer to the node that is valid while the graph is alive.
+ */
+ virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
+
+ /**
+ * Adds an edge that represents a strong reference from the given node
+ * |from| to the given node |to|. The nodes must be added to the graph
+ * before calling this function.
+ */
+ virtual void AddEdge(Node* from, Node* to) = 0;
+
+ virtual ~EmbedderGraph() = default;
+};
/**
* Interface for controlling heap profiling. Instance of the
@@ -665,6 +727,15 @@ class V8_EXPORT HeapProfiler {
typedef RetainedObjectInfo* (*WrapperInfoCallback)(uint16_t class_id,
Local<Value> wrapper);
+ /**
+ * Callback function invoked during heap snapshot generation to retrieve
+ * the embedder object graph. The callback should use graph->AddEdge(..) to
+ * add references between the objects.
+ * The callback must not trigger garbage collection in V8.
+ */
+ typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
+ v8::EmbedderGraph* graph);
+
/** Returns the number of snapshots taken. */
int GetSnapshotCount();
@@ -809,6 +880,7 @@ class V8_EXPORT HeapProfiler {
WrapperInfoCallback callback);
void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback);
+ void SetBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback);
/**
* Default value of persistent handle class ID. Must not be used to
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 15ea225dc1..a306965122 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -197,16 +197,6 @@ class PersistentValueMapBase {
}
/**
- * Call Isolate::SetReference with the given parent and the map value.
- */
- void SetReference(const K& key,
- const Persistent<Object>& parent) {
- GetIsolate()->SetReference(
- reinterpret_cast<internal::Object**>(parent.val_),
- reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))));
- }
-
- /**
* Call V8::RegisterExternallyReferencedObject with the map value for given
* key.
*/
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 71f3b774a3..da78020d07 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
-#define V8_MINOR_VERSION 5
-#define V8_BUILD_NUMBER 254
-#define V8_PATCH_LEVEL 43
+#define V8_MINOR_VERSION 6
+#define V8_BUILD_NUMBER 346
+#define V8_PATCH_LEVEL 23
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index d5b554dc96..277cbd442a 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -145,7 +145,7 @@ class Heap;
class HeapObject;
class Isolate;
class Object;
-struct StreamedSource;
+struct ScriptStreamingData;
template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
@@ -554,6 +554,14 @@ template <class T> class PersistentBase {
V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
+ * Annotates the strong handle with the given label, which is then used by the
+ * heap snapshot generator as a name of the edge from the root to the handle.
+ * The function does not take ownership of the label and assumes that the
+ * label is valid as long as the handle is valid.
+ */
+ V8_INLINE void AnnotateStrongRetainer(const char* label);
+
+ /**
* Allows the embedder to tell the v8 garbage collector that a certain object
* is alive. Only allowed when the embedder is asked to trace its heap by
* EmbedderHeapTracer.
@@ -945,7 +953,7 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
*/
class V8_EXPORT SealHandleScope {
public:
- SealHandleScope(Isolate* isolate);
+ explicit SealHandleScope(Isolate* isolate);
~SealHandleScope();
SealHandleScope(const SealHandleScope&) = delete;
@@ -1136,6 +1144,7 @@ class V8_EXPORT Module {
public:
/**
* The different states a module can be in.
+ *
* This corresponds to the states used in ECMAScript except that "evaluated"
* is split into kEvaluated and kErrored, indicating success and failure,
* respectively.
@@ -1186,7 +1195,7 @@ class V8_EXPORT Module {
Local<Module> referrer);
/**
- * ModuleDeclarationInstantiation
+ * Instantiates the module and its dependencies.
*
* Returns an empty Maybe<bool> if an exception occurred during
* instantiation. (In the case where the callback throws an exception, that
@@ -1196,16 +1205,19 @@ class V8_EXPORT Module {
ResolveCallback callback);
/**
- * ModuleEvaluation
+ * Evaluates the module and its dependencies.
*
- * Returns the completion value.
- * TODO(neis): Be more precise or say nothing.
+ * If status is kInstantiated, run the module's code. On success, set status
+ * to kEvaluated and return the completion value; on failure, set status to
+ * kErrored and propagate the thrown exception (which is then also available
+ * via |GetException|).
*/
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
/**
* Returns the namespace object of this module.
- * The module's status must be kEvaluated.
+ *
+ * The module's status must be at least kInstantiated.
*/
Local<Value> GetModuleNamespace();
};
@@ -1219,24 +1231,23 @@ class V8_EXPORT Script {
/**
* A shorthand for ScriptCompiler::Compile().
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<Script> Compile(Local<String> source,
- ScriptOrigin* origin = nullptr));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Script> Compile(Local<String> source,
+ ScriptOrigin* origin = nullptr));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, Local<String> source,
ScriptOrigin* origin = nullptr);
- static Local<Script> V8_DEPRECATE_SOON("Use maybe version",
- Compile(Local<String> source,
- Local<String> file_name));
+ static Local<Script> V8_DEPRECATED("Use maybe version",
+ Compile(Local<String> source,
+ Local<String> file_name));
/**
* Runs the script returning the resulting value. It will be run in the
* context in which it was created (ScriptCompiler::CompileBound or
* UnboundScript::BindToCurrentContext()).
*/
- V8_DEPRECATE_SOON("Use maybe version", Local<Value> Run());
+ V8_DEPRECATED("Use maybe version", Local<Value> Run());
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Run(Local<Context> context);
/**
@@ -1395,14 +1406,14 @@ class V8_EXPORT ScriptCompiler {
// object is alive.
const CachedData* GetCachedData() const;
- internal::StreamedSource* impl() const { return impl_; }
+ internal::ScriptStreamingData* impl() const { return impl_; }
// Prevent copying.
StreamedSource(const StreamedSource&) = delete;
StreamedSource& operator=(const StreamedSource&) = delete;
private:
- internal::StreamedSource* impl_;
+ internal::ScriptStreamingData* impl_;
};
/**
@@ -1546,13 +1557,13 @@ class V8_EXPORT ScriptCompiler {
* It is possible to specify multiple context extensions (obj in the above
* example).
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<Function> CompileFunctionInContext(
- Isolate* isolate, Source* source,
- Local<Context> context, size_t arguments_count,
- Local<String> arguments[],
- size_t context_extension_count,
- Local<Object> context_extensions[]));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Function> CompileFunctionInContext(
+ Isolate* isolate, Source* source,
+ Local<Context> context, size_t arguments_count,
+ Local<String> arguments[],
+ size_t context_extension_count,
+ Local<Object> context_extensions[]));
static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
Local<Context> context, Source* source, size_t arguments_count,
Local<String> arguments[], size_t context_extension_count,
@@ -1580,7 +1591,7 @@ class V8_EXPORT Message {
public:
Local<String> Get() const;
- V8_DEPRECATE_SOON("Use maybe version", Local<String> GetSourceLine() const);
+ V8_DEPRECATED("Use maybe version", Local<String> GetSourceLine() const);
V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
Local<Context> context) const;
@@ -1606,7 +1617,7 @@ class V8_EXPORT Message {
/**
* Returns the number, 1-based, of the line where the error occurred.
*/
- V8_DEPRECATE_SOON("Use maybe version", int GetLineNumber() const);
+ V8_DEPRECATED("Use maybe version", int GetLineNumber() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetLineNumber(Local<Context> context) const;
/**
@@ -1630,7 +1641,7 @@ class V8_EXPORT Message {
* Returns the index within the line of the first character where
* the error occurred.
*/
- V8_DEPRECATE_SOON("Use maybe version", int GetStartColumn() const);
+ V8_DEPRECATED("Use maybe version", int GetStartColumn() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetStartColumn(Local<Context> context) const;
/**
@@ -1864,7 +1875,7 @@ class V8_EXPORT ValueSerializer {
* SharedArrayBuffer object. The embedder must return an ID for the
* object, using the same ID if this SharedArrayBuffer has already been
* serialized in this buffer. When deserializing, this ID will be passed to
- * ValueDeserializer::TransferSharedArrayBuffer as |transfer_id|.
+ * ValueDeserializer::GetSharedArrayBufferFromId as |clone_id|.
*
* If the object cannot be serialized, an
* exception should be thrown and Nothing<uint32_t>() returned.
@@ -1991,6 +2002,13 @@ class V8_EXPORT ValueDeserializer {
*/
virtual MaybeLocal<WasmCompiledModule> GetWasmModuleFromId(
Isolate* isolate, uint32_t transfer_id);
+
+ /**
+ * Get a SharedArrayBuffer given a clone_id previously provided
+ * by ValueSerializer::GetSharedArrayBufferId
+ */
+ virtual MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
+ Isolate* isolate, uint32_t clone_id);
};
ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
@@ -2309,6 +2327,16 @@ class V8_EXPORT Value : public Data {
bool IsFloat64Array() const;
/**
+ * Returns true if this value is a BigInt64Array.
+ */
+ bool IsBigInt64Array() const;
+
+ /**
+ * Returns true if this value is a BigUint64Array.
+ */
+ bool IsBigUint64Array() const;
+
+ /**
* Returns true if this value is a DataView.
*/
bool IsDataView() const;
@@ -2783,8 +2811,8 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Utf8Value {
public:
- V8_DEPRECATE_SOON("Use Isolate version",
- explicit Utf8Value(Local<v8::Value> obj));
+ V8_DEPRECATED("Use Isolate version",
+ explicit Utf8Value(Local<v8::Value> obj));
Utf8Value(Isolate* isolate, Local<v8::Value> obj);
~Utf8Value();
char* operator*() { return str_; }
@@ -2808,8 +2836,7 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Value {
public:
- V8_DEPRECATE_SOON("Use Isolate version",
- explicit Value(Local<v8::Value> obj));
+ V8_DEPRECATED("Use Isolate version", explicit Value(Local<v8::Value> obj));
Value(Isolate* isolate, Local<v8::Value> obj);
~Value();
uint16_t* operator*() { return str_; }
@@ -3059,6 +3086,12 @@ enum class KeyCollectionMode { kOwnOnly, kIncludePrototypes };
enum class IndexFilter { kIncludeIndices, kSkipIndices };
/**
+ * kConvertToString will convert integer indices to strings.
+ * kKeepNumbers will return numbers for integer indices.
+ */
+enum class KeyConversionMode { kConvertToString, kKeepNumbers };
+
+/**
* Integrity level for objects.
*/
enum class IntegrityLevel { kFrozen, kSealed };
@@ -3193,6 +3226,19 @@ class V8_EXPORT Object : public Value {
Local<Value> data = Local<Value>(), PropertyAttribute attributes = None);
/**
+ * Attempts to create a property with the given name which behaves like a data
+ * property, except that the provided getter is invoked (and provided with the
+ * data value) to supply its value the first time it is read. After the
+ * property is accessed once, it is replaced with an ordinary data property.
+ *
+ * Analogous to Template::SetLazyDataProperty.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetLazyDataProperty(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter, Local<Value> data = Local<Value>(),
+ PropertyAttribute attributes = None);
+
+ /**
* Functionality for private properties.
* This is an experimental feature, use at your own risk.
* Note: Private properties are not inherited. Do not rely on this, since it
@@ -3215,7 +3261,8 @@ class V8_EXPORT Object : public Value {
Local<Context> context);
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
Local<Context> context, KeyCollectionMode mode,
- PropertyFilter property_filter, IndexFilter index_filter);
+ PropertyFilter property_filter, IndexFilter index_filter,
+ KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
/**
* This function has the same functionality as GetPropertyNames but
@@ -3233,7 +3280,8 @@ class V8_EXPORT Object : public Value {
* be enumerated by a for-in statement over this object.
*/
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
- Local<Context> context, PropertyFilter filter);
+ Local<Context> context, PropertyFilter filter,
+ KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
/**
* Get the prototype object. This does not skip objects marked to
@@ -4046,11 +4094,15 @@ class V8_EXPORT Proxy : public Object {
class V8_EXPORT WasmCompiledModule : public Object {
public:
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
- // A buffer that is owned by the caller.
+ /**
+ * A buffer that is owned by the caller.
+ */
typedef std::pair<const uint8_t*, size_t> CallerOwnedBuffer;
- // An opaque, native heap object for transferring wasm modules. It
- // supports move semantics, and does not support copy semantics.
+ /**
+ * An opaque, native heap object for transferring wasm modules. It
+ * supports move semantics, and does not support copy semantics.
+ */
class TransferrableModule final {
public:
TransferrableModule(TransferrableModule&& src) = default;
@@ -4069,35 +4121,41 @@ class V8_EXPORT WasmCompiledModule : public Object {
OwnedBuffer wire_bytes = {nullptr, 0};
};
- // Get an in-memory, non-persistable, and context-independent (meaning,
- // suitable for transfer to another Isolate and Context) representation
- // of this wasm compiled module.
+ /**
+ * Get an in-memory, non-persistable, and context-independent (meaning,
+ * suitable for transfer to another Isolate and Context) representation
+ * of this wasm compiled module.
+ */
TransferrableModule GetTransferrableModule();
- // Efficiently re-create a WasmCompiledModule, without recompiling, from
- // a TransferrableModule.
+ /**
+ * Efficiently re-create a WasmCompiledModule, without recompiling, from
+ * a TransferrableModule.
+ */
static MaybeLocal<WasmCompiledModule> FromTransferrableModule(
Isolate* isolate, const TransferrableModule&);
- // Get the wasm-encoded bytes that were used to compile this module.
+ /**
+ * Get the wasm-encoded bytes that were used to compile this module.
+ */
Local<String> GetWasmWireBytes();
- // Serialize the compiled module. The serialized data does not include the
- // uncompiled bytes.
+ /**
+ * Serialize the compiled module. The serialized data does not include the
+ * uncompiled bytes.
+ */
SerializedModule Serialize();
- // If possible, deserialize the module, otherwise compile it from the provided
- // uncompiled bytes.
+ /**
+ * If possible, deserialize the module, otherwise compile it from the provided
+ * uncompiled bytes.
+ */
static MaybeLocal<WasmCompiledModule> DeserializeOrCompile(
Isolate* isolate, const CallerOwnedBuffer& serialized_module,
const CallerOwnedBuffer& wire_bytes);
V8_INLINE static WasmCompiledModule* Cast(Value* obj);
private:
- // TODO(ahaas): please remove the friend once streamed compilation is
- // implemented
- friend class WasmModuleObjectBuilder;
-
static MaybeLocal<WasmCompiledModule> Deserialize(
Isolate* isolate, const CallerOwnedBuffer& serialized_module,
const CallerOwnedBuffer& wire_bytes);
@@ -4117,11 +4175,18 @@ class V8_EXPORT WasmCompiledModule : public Object {
// to simply WasmModuleObjectBuilder
class V8_EXPORT WasmModuleObjectBuilderStreaming final {
public:
- WasmModuleObjectBuilderStreaming(Isolate* isolate);
- // The buffer passed into OnBytesReceived is owned by the caller.
+ explicit WasmModuleObjectBuilderStreaming(Isolate* isolate);
+ /**
+ * The buffer passed into OnBytesReceived is owned by the caller.
+ */
void OnBytesReceived(const uint8_t*, size_t size);
void Finish();
- void Abort(Local<Value> exception);
+ /**
+ * Abort streaming compilation. If {exception} has a value, then the promise
+ * associated with streaming compilation is rejected with that value. If
+ * {exception} does not have value, the promise does not get rejected.
+ */
+ void Abort(MaybeLocal<Value> exception);
Local<Promise> GetPromise();
~WasmModuleObjectBuilderStreaming();
@@ -4140,11 +4205,13 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final {
Isolate* isolate_ = nullptr;
#if V8_CC_MSVC
- // We don't need the static Copy API, so the default
- // NonCopyablePersistentTraits would be sufficient, however,
- // MSVC eagerly instantiates the Copy.
- // We ensure we don't use Copy, however, by compiling with the
- // defaults everywhere else.
+ /**
+ * We don't need the static Copy API, so the default
+ * NonCopyablePersistentTraits would be sufficient, however,
+ * MSVC eagerly instantiates the Copy.
+ * We ensure we don't use Copy, however, by compiling with the
+ * defaults everywhere else.
+ */
Persistent<Promise, CopyablePersistentTraits<Promise>> promise_;
#else
Persistent<Promise> promise_;
@@ -4154,30 +4221,6 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final {
std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
};
-class V8_EXPORT WasmModuleObjectBuilder final {
- public:
- WasmModuleObjectBuilder(Isolate* isolate) : isolate_(isolate) {}
- // The buffer passed into OnBytesReceived is owned by the caller.
- void OnBytesReceived(const uint8_t*, size_t size);
- MaybeLocal<WasmCompiledModule> Finish();
-
- private:
- Isolate* isolate_ = nullptr;
- // TODO(ahaas): We probably need none of this below here once streamed
- // compilation is implemented.
- typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> Buffer;
-
- // Disable copy semantics *in this implementation*. We can choose to
- // relax this, albeit it's not clear why.
- WasmModuleObjectBuilder(const WasmModuleObjectBuilder&) = delete;
- WasmModuleObjectBuilder(WasmModuleObjectBuilder&&) = default;
- WasmModuleObjectBuilder& operator=(const WasmModuleObjectBuilder&) = delete;
- WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&) = default;
-
- std::vector<Buffer> received_buffers_;
- size_t total_size_ = 0;
-};
-
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
@@ -4617,6 +4660,37 @@ class V8_EXPORT Float64Array : public TypedArray {
static void CheckCast(Value* obj);
};
+/**
+ * An instance of BigInt64Array constructor.
+ */
+class V8_EXPORT BigInt64Array : public TypedArray {
+ public:
+ static Local<BigInt64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<BigInt64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static BigInt64Array* Cast(Value* obj);
+
+ private:
+ BigInt64Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of BigUint64Array constructor.
+ */
+class V8_EXPORT BigUint64Array : public TypedArray {
+ public:
+ static Local<BigUint64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<BigUint64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static BigUint64Array* Cast(Value* obj);
+
+ private:
+ BigUint64Array();
+ static void CheckCast(Value* obj);
+};
/**
* An instance of DataView constructor (ES6 draft 15.13.7).
@@ -4878,9 +4952,8 @@ class V8_EXPORT RegExp : public Object {
* static_cast<RegExp::Flags>(kGlobal | kMultiline))
* is equivalent to evaluating "/foo/gm".
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<RegExp> New(Local<String> pattern,
- Flags flags));
+ static V8_DEPRECATED("Use maybe version",
+ Local<RegExp> New(Local<String> pattern, Flags flags));
static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> New(Local<Context> context,
Local<String> pattern,
Flags flags);
@@ -7437,7 +7510,7 @@ class V8_EXPORT Isolate {
/**
* Enqueues the callback to the Microtask Work Queue
*/
- void EnqueueMicrotask(MicrotaskCallback microtask, void* data = NULL);
+ void EnqueueMicrotask(MicrotaskCallback callback, void* data = nullptr);
/**
* Controls how Microtasks are invoked. See MicrotasksPolicy for details.
@@ -7967,6 +8040,8 @@ class V8_EXPORT V8 {
WeakCallbackInfo<void>::Callback weak_callback);
static void MakeWeak(internal::Object*** location_addr);
static void* ClearWeak(internal::Object** location);
+ static void AnnotateStrongRetainer(internal::Object** location,
+ const char* label);
static Value* Eternalize(Isolate* isolate, Value* handle);
static void RegisterExternallyReferencedObject(internal::Object** object,
@@ -8203,7 +8278,7 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
- TryCatch(Isolate* isolate);
+ explicit TryCatch(Isolate* isolate);
/**
* Unregisters and deletes this try/catch block.
@@ -9175,6 +9250,12 @@ P* PersistentBase<T>::ClearWeak() {
}
template <class T>
+void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
+ V8::AnnotateStrongRetainer(reinterpret_cast<internal::Object**>(this->val_),
+ label);
+}
+
+template <class T>
void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
if (IsEmpty()) return;
V8::RegisterExternallyReferencedObject(
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index d57b117f10..cda76faf51 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -31,11 +31,6 @@ verifiers {
triggered_by: "v8_linux64_asan_rel_ng"
}
builders { name: "v8_linux64_gcc_compile_dbg" }
- builders { name: "v8_linux64_gyp_rel_ng" }
- builders {
- name: "v8_linux64_gyp_rel_ng_triggered"
- triggered_by: "v8_linux64_gyp_rel_ng"
- }
builders { name: "v8_linux64_rel_ng" }
builders {
name: "v8_linux64_rel_ng_triggered"
@@ -96,10 +91,7 @@ verifiers {
}
builders { name: "v8_node_linux64_rel" }
builders { name: "v8_presubmit" }
- builders {
- name: "v8_win64_msvc_compile_rel"
- experiment_percentage: 20
- }
+ builders { name: "v8_win64_msvc_compile_rel" }
builders { name: "v8_win64_rel_ng" }
builders {
name: "v8_win64_rel_ng_triggered"
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index e025c98f9e..15ce744999 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -45,565 +45,532 @@
},
'client.dart.fyi': {
- 'v8-linux-release': 'gn_release_x86_disassembler',
- 'v8-win-release': 'gn_release_x86_disassembler',
- 'v8-mac-release': 'gn_release_x86_disassembler',
+ 'v8-linux-release': 'release_x86_disassembler',
+ 'v8-win-release': 'release_x86_disassembler',
+ 'v8-mac-release': 'release_x86_disassembler',
},
'client.dynamorio': {
- 'linux-v8-dr': 'gn_release_x64',
+ 'linux-v8-dr': 'release_x64',
},
'client.v8': {
# Linux.
- 'V8 Linux - builder': 'gn_release_x86_gcmole',
- 'V8 Linux - debug builder': 'gn_debug_x86',
- 'V8 Linux - nosnap builder': 'gn_release_x86_no_snap',
- 'V8 Linux - nosnap debug builder': 'gn_debug_x86_no_snap',
- 'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
- 'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
- 'V8 Linux - verify csa': 'gn_release_x86_verify_csa',
+ 'V8 Linux - builder': 'release_x86_gcmole',
+ 'V8 Linux - debug builder': 'debug_x86',
+ 'V8 Linux - nosnap builder': 'release_x86_no_snap',
+ 'V8 Linux - nosnap debug builder': 'debug_x86_no_snap',
+ 'V8 Linux - shared': 'release_x86_shared_verify_heap',
+ 'V8 Linux - noi18n - debug': 'debug_x86_no_i18n',
+ 'V8 Linux - verify csa': 'release_x86_verify_csa',
# Linux64.
- 'V8 Linux64 - builder': 'gn_release_x64',
- 'V8 Linux64 - concurrent marking - builder': 'gn_release_x64_concurrent_marking',
- 'V8 Linux64 - debug builder': 'gn_debug_x64',
- 'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
- 'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
- 'V8 Linux64 - gyp': 'gyp_release_x64',
- 'V8 Linux64 - verify csa': 'gn_release_x64_verify_csa',
+ 'V8 Linux64 - builder': 'release_x64',
+ 'V8 Linux64 - concurrent marking - builder': 'release_x64_concurrent_marking',
+ 'V8 Linux64 - debug builder': 'debug_x64',
+ 'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom',
+ 'V8 Linux64 - internal snapshot': 'release_x64_internal',
+ 'V8 Linux64 - verify csa': 'release_x64_verify_csa',
# Windows.
- 'V8 Win32 - builder': 'gn_release_x86_minimal_symbols',
- 'V8 Win32 - debug builder': 'gn_debug_x86_minimal_symbols',
+ 'V8 Win32 - builder': 'release_x86_minimal_symbols',
+ 'V8 Win32 - debug builder': 'debug_x86_minimal_symbols',
'V8 Win32 - nosnap - shared':
- 'gn_release_x86_no_snap_shared_minimal_symbols',
- 'V8 Win32 ASAN': 'gn_release_x86_asan_no_lsan',
- 'V8 Win64': 'gn_release_x64_minimal_symbols',
- 'V8 Win64 - debug': 'gn_debug_x64_minimal_symbols',
- 'V8 Win64 - msvc': 'gn_release_x64_msvc',
+ 'release_x86_no_snap_shared_minimal_symbols',
+ # TODO(machenbach): Remove after switching to x64 on infra side.
+ 'V8 Win64 ASAN': 'release_x64_asan_no_lsan',
+ 'V8 Win64': 'release_x64_minimal_symbols',
+ 'V8 Win64 - debug': 'debug_x64_minimal_symbols',
+ 'V8 Win64 - msvc': 'release_x64_msvc',
# Mac.
- 'V8 Mac': 'gn_release_x86',
- 'V8 Mac - debug': 'gn_debug_x86',
- 'V8 Mac64': 'gn_release_x64',
- 'V8 Mac64 - debug': 'gn_debug_x64',
- 'V8 Mac GC Stress': 'gn_debug_x86',
- 'V8 Mac64 ASAN': 'gn_release_x64_asan_no_lsan',
+ 'V8 Mac': 'release_x86',
+ 'V8 Mac - debug': 'debug_x86',
+ 'V8 Mac64': 'release_x64',
+ 'V8 Mac64 - debug': 'debug_x64',
+ 'V8 Mac GC Stress': 'debug_x86',
+ 'V8 Mac64 ASAN': 'release_x64_asan_no_lsan',
# Sanitizers.
- 'V8 Linux64 ASAN': 'gn_release_x64_asan',
- 'V8 Linux64 TSAN': 'gn_release_x64_tsan',
+ 'V8 Linux64 ASAN': 'release_x64_asan',
+ 'V8 Linux64 TSAN': 'release_x64_tsan',
'V8 Linux64 TSAN - concurrent marking':
- 'gn_release_x64_tsan_concurrent_marking',
- 'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
+ 'release_x64_tsan_concurrent_marking',
+ 'V8 Linux - arm64 - sim - MSAN': 'release_simulate_arm64_msan',
# Misc.
- 'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
- 'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc',
+ 'V8 Linux gcc 4.8': 'release_x86_gcc',
+ 'V8 Linux64 gcc 4.8 - debug': 'debug_x64_gcc',
# FYI.
- 'V8 Fuchsia': 'gn_release_x64_fuchsia',
- 'V8 Fuchsia - debug': 'gn_debug_x64_fuchsia',
- 'V8 Linux - swarming staging': 'gn_release_x64',
- 'V8 Linux64 - cfi': 'gn_release_x64_cfi',
- 'V8 Linux64 UBSanVptr': 'gn_release_x64_ubsan_vptr',
- 'V8 Linux - vtunejit': 'gn_debug_x86_vtunejit',
- 'V8 Linux64 - gcov coverage': 'gn_release_x64_gcc_coverage',
- 'V8 Linux - predictable': 'gn_release_x86_predictable',
- 'V8 Linux - full debug': 'gn_full_debug_x86',
- 'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp',
- 'V8 Random Deopt Fuzzer - debug': 'gn_debug_x64',
+ 'V8 Fuchsia': 'release_x64_fuchsia',
+ 'V8 Fuchsia - debug': 'debug_x64_fuchsia',
+ 'V8 Linux - swarming staging': 'release_x64',
+ 'V8 Linux64 - cfi': 'release_x64_cfi',
+ 'V8 Linux64 UBSanVptr': 'release_x64_ubsan_vptr',
+ 'V8 Linux - vtunejit': 'debug_x86_vtunejit',
+ 'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage',
+ 'V8 Linux - predictable': 'release_x86_predictable',
+ 'V8 Linux - full debug': 'full_debug_x86',
+ 'V8 Linux - interpreted regexp': 'release_x86_interpreted_regexp',
+ 'V8 Random Deopt Fuzzer - debug': 'debug_x64',
},
'client.v8.clusterfuzz': {
- 'V8 Win32 ASAN - release builder':
- 'gn_release_x86_asan_no_lsan_verify_heap',
+ 'V8 Win64 ASAN - release builder':
+ 'release_x64_asan_no_lsan_verify_heap',
# Note this is called a debug builder, but it uses a release build
# configuration with dchecks (which enables DEBUG in V8), since win-asan
# debug is not supported.
- 'V8 Win32 ASAN - debug builder':
- 'gn_release_x86_asan_no_lsan_verify_heap_dchecks',
+ 'V8 Win64 ASAN - debug builder':
+ 'release_x64_asan_no_lsan_verify_heap_dchecks',
'V8 Mac64 ASAN - release builder':
- 'gn_release_x64_asan_no_lsan_edge_verify_heap',
+ 'release_x64_asan_no_lsan_edge_verify_heap',
'V8 Mac64 ASAN - debug builder':
- 'gn_debug_x64_asan_no_lsan_static_edge',
- 'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer',
- 'V8 Linux64 - debug builder': 'gn_debug_x64',
+ 'debug_x64_asan_no_lsan_static_edge',
+ 'V8 Linux64 - release builder': 'release_x64_correctness_fuzzer',
+ 'V8 Linux64 - debug builder': 'debug_x64',
+ 'V8 Linux64 - nosnap release builder': 'release_x64_no_snap',
+ 'V8 Linux64 - nosnap debug builder': 'debug_x64_no_snap',
'V8 Linux64 ASAN no inline - release builder':
- 'gn_release_x64_asan_symbolized_edge_verify_heap',
- 'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge',
+ 'release_x64_asan_symbolized_edge_verify_heap',
+ 'V8 Linux64 ASAN - debug builder': 'debug_x64_asan_edge',
'V8 Linux64 ASAN arm64 - debug builder':
- 'gn_debug_simulate_arm64_asan_edge',
+ 'debug_simulate_arm64_asan_edge',
'V8 Linux ASAN arm - debug builder':
- 'gn_debug_simulate_arm_asan_edge',
+ 'debug_simulate_arm_asan_edge',
'V8 Linux ASAN mipsel - debug builder':
- 'gn_debug_simulate_mipsel_asan_edge',
- 'V8 Linux64 CFI - release builder': 'gn_release_x64_cfi_clusterfuzz',
+ 'debug_simulate_mipsel_asan_edge',
+ 'V8 Linux64 CFI - release builder': 'release_x64_cfi_clusterfuzz',
'V8 Linux MSAN no origins':
- 'gn_release_simulate_arm64_msan_no_origins_edge',
+ 'release_simulate_arm64_msan_no_origins_edge',
'V8 Linux MSAN chained origins':
- 'gn_release_simulate_arm64_msan_edge',
- 'V8 Linux64 TSAN - release builder': 'gn_release_x64_tsan',
+ 'release_simulate_arm64_msan_edge',
+ 'V8 Linux64 TSAN - release builder': 'release_x64_tsan',
'V8 Linux64 UBSanVptr - release builder':
- 'gn_release_x64_ubsan_vptr_recover_edge',
+ 'release_x64_ubsan_vptr_recover_edge',
},
'client.v8.ports': {
# Arm.
- 'V8 Arm - builder': 'gn_release_arm',
- 'V8 Arm - debug builder': 'gn_debug_arm',
- 'V8 Android Arm - builder': 'gn_release_android_arm',
- 'V8 Linux - arm - sim': 'gn_release_simulate_arm',
- 'V8 Linux - arm - sim - debug': 'gn_debug_simulate_arm',
+ 'V8 Arm - builder': 'release_arm',
+ 'V8 Arm - debug builder': 'debug_arm',
+ 'V8 Android Arm - builder': 'release_android_arm',
+ 'V8 Linux - arm - sim': 'release_simulate_arm',
+ 'V8 Linux - arm - sim - debug': 'debug_simulate_arm',
# Arm64.
- 'V8 Android Arm64 - builder': 'gn_release_android_arm64',
- 'V8 Linux - arm64 - sim': 'gn_release_simulate_arm64',
- 'V8 Linux - arm64 - sim - debug': 'gn_debug_simulate_arm64',
+ 'V8 Android Arm64 - builder': 'release_android_arm64',
+ 'V8 Linux - arm64 - sim': 'release_simulate_arm64',
+ 'V8 Linux - arm64 - sim - debug': 'debug_simulate_arm64',
'V8 Linux - arm64 - sim - nosnap - debug':
- 'gn_debug_simulate_arm64_no_snap',
- 'V8 Linux - arm64 - sim - gc stress': 'gn_debug_simulate_arm64',
+ 'debug_simulate_arm64_no_snap',
+ 'V8 Linux - arm64 - sim - gc stress': 'debug_simulate_arm64',
# Mips.
- 'V8 Mips - builder': 'gyp_release_mips_no_snap_no_i18n',
- 'V8 Linux - mipsel - sim - builder': 'gn_release_simulate_mipsel',
- 'V8 Linux - mips64el - sim - builder': 'gn_release_simulate_mips64el',
+ 'V8 Mips - builder': 'release_mips_no_snap_no_i18n',
+ 'V8 Linux - mipsel - sim - builder': 'release_simulate_mipsel',
+ 'V8 Linux - mips64el - sim - builder': 'release_simulate_mips64el',
# PPC.
- 'V8 Linux - ppc - sim': 'gn_release_simulate_ppc',
- 'V8 Linux - ppc64 - sim': 'gn_release_simulate_ppc64',
+ 'V8 Linux - ppc - sim': 'release_simulate_ppc',
+ 'V8 Linux - ppc64 - sim': 'release_simulate_ppc64',
# S390.
- 'V8 Linux - s390 - sim': 'gn_release_simulate_s390',
- 'V8 Linux - s390x - sim': 'gn_release_simulate_s390x',
+ 'V8 Linux - s390 - sim': 'release_simulate_s390',
+ 'V8 Linux - s390x - sim': 'release_simulate_s390x',
},
'client.v8.branches': {
- 'V8 Linux - beta branch': 'gn_release_x86',
- 'V8 Linux - beta branch - debug': 'gn_debug_x86',
- 'V8 Linux - stable branch': 'gn_release_x86',
- 'V8 Linux - stable branch - debug': 'gn_debug_x86',
- 'V8 Linux64 - beta branch': 'gn_release_x64',
- 'V8 Linux64 - beta branch - debug': 'gn_debug_x64',
- 'V8 Linux64 - stable branch': 'gn_release_x64',
- 'V8 Linux64 - stable branch - debug': 'gn_debug_x64',
- 'V8 arm - sim - beta branch': 'gn_release_simulate_arm',
- 'V8 arm - sim - beta branch - debug': 'gn_debug_simulate_arm',
- 'V8 arm - sim - stable branch': 'gn_release_simulate_arm',
- 'V8 arm - sim - stable branch - debug': 'gn_debug_simulate_arm',
- 'V8 mips64el - sim - beta branch': 'gn_release_simulate_mips64el',
- 'V8 mips64el - sim - stable branch': 'gn_release_simulate_mips64el',
- 'V8 mipsel - sim - beta branch': 'gn_release_simulate_mipsel',
- 'V8 mipsel - sim - stable branch': 'gn_release_simulate_mipsel',
- 'V8 ppc - sim - beta branch': 'gn_release_simulate_ppc',
- 'V8 ppc - sim - stable branch': 'gn_release_simulate_ppc',
- 'V8 ppc64 - sim - beta branch': 'gn_release_simulate_ppc64',
- 'V8 ppc64 - sim - stable branch': 'gn_release_simulate_ppc64',
- 'V8 s390 - sim - beta branch': 'gn_release_simulate_s390',
- 'V8 s390 - sim - stable branch': 'gn_release_simulate_s390',
- 'V8 s390x - sim - beta branch': 'gn_release_simulate_s390x',
- 'V8 s390x - sim - stable branch': 'gn_release_simulate_s390x',
+ 'V8 Linux - beta branch': 'release_x86',
+ 'V8 Linux - beta branch - debug': 'debug_x86',
+ 'V8 Linux - stable branch': 'release_x86',
+ 'V8 Linux - stable branch - debug': 'debug_x86',
+ 'V8 Linux64 - beta branch': 'release_x64',
+ 'V8 Linux64 - beta branch - debug': 'debug_x64',
+ 'V8 Linux64 - stable branch': 'release_x64',
+ 'V8 Linux64 - stable branch - debug': 'debug_x64',
+ 'V8 arm - sim - beta branch': 'release_simulate_arm',
+ 'V8 arm - sim - beta branch - debug': 'debug_simulate_arm',
+ 'V8 arm - sim - stable branch': 'release_simulate_arm',
+ 'V8 arm - sim - stable branch - debug': 'debug_simulate_arm',
+ 'V8 mips64el - sim - beta branch': 'release_simulate_mips64el',
+ 'V8 mips64el - sim - stable branch': 'release_simulate_mips64el',
+ 'V8 mipsel - sim - beta branch': 'release_simulate_mipsel',
+ 'V8 mipsel - sim - stable branch': 'release_simulate_mipsel',
+ 'V8 ppc - sim - beta branch': 'release_simulate_ppc',
+ 'V8 ppc - sim - stable branch': 'release_simulate_ppc',
+ 'V8 ppc64 - sim - beta branch': 'release_simulate_ppc64',
+ 'V8 ppc64 - sim - stable branch': 'release_simulate_ppc64',
+ 'V8 s390 - sim - beta branch': 'release_simulate_s390',
+ 'V8 s390 - sim - stable branch': 'release_simulate_s390',
+ 'V8 s390x - sim - beta branch': 'release_simulate_s390x',
+ 'V8 s390x - sim - stable branch': 'release_simulate_s390x',
},
'tryserver.v8': {
- 'v8_fuchsia_rel_ng': 'gn_release_x64_fuchsia_trybot',
- 'v8_linux_rel_ng': 'gn_release_x86_gcmole_trybot',
- 'v8_linux_verify_csa_rel_ng': 'gn_release_x86_verify_csa',
- 'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
- 'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
- 'v8_linux_noi18n_rel_ng': 'gn_release_x86_no_i18n_trybot',
- 'v8_linux_gc_stress_dbg': 'gn_debug_x86_trybot',
- 'v8_linux_nosnap_rel': 'gn_release_x86_no_snap_trybot',
- 'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
- 'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
- 'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
- 'v8_linux_shared_compile_rel': 'gn_release_x86_shared_verify_heap',
- 'v8_linux64_gcc_compile_dbg': 'gn_debug_x64_gcc',
- 'v8_linux64_fyi_rel_ng': 'gn_release_x64_test_features_trybot',
- 'v8_linux64_rel_ng': 'gn_release_x64_test_features_trybot',
- 'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
- 'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
- 'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols',
- 'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols',
+ 'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot',
+ 'v8_linux_rel_ng': 'release_x86_gcmole_trybot',
+ 'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa',
+ 'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols',
+ 'v8_linux_dbg_ng': 'debug_x86_trybot',
+ 'v8_linux_noi18n_rel_ng': 'release_x86_no_i18n_trybot',
+ 'v8_linux_gc_stress_dbg': 'debug_x86_trybot',
+ 'v8_linux_nosnap_rel': 'release_x86_no_snap_trybot',
+ 'v8_linux_nosnap_dbg': 'debug_x86_no_snap_trybot',
+ 'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols',
+ 'v8_linux_gcc_rel': 'release_x86_gcc_minimal_symbols',
+ 'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap',
+ 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
+ 'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
+ 'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
+ 'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa',
+ 'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols',
+ 'v8_linux64_msan_rel': 'release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_sanitizer_coverage_rel':
- 'gn_release_x64_asan_minimal_symbols_coverage',
- 'v8_linux64_tsan_rel': 'gn_release_x64_tsan_minimal_symbols',
+ 'release_x64_asan_minimal_symbols_coverage',
+ 'v8_linux64_tsan_rel': 'release_x64_tsan_minimal_symbols',
'v8_linux64_tsan_concurrent_marking_rel_ng':
- 'gn_release_x64_tsan_concurrent_marking_minimal_symbols',
- 'v8_linux64_ubsan_rel_ng': 'gn_release_x64_ubsan_vptr_minimal_symbols',
- 'v8_win_asan_rel_ng': 'gn_release_x86_asan_no_lsan',
- 'v8_win_dbg': 'gn_debug_x86_trybot',
- 'v8_win_compile_dbg': 'gn_debug_x86_trybot',
- 'v8_win_rel_ng': 'gn_release_x86_trybot',
+ 'release_x64_tsan_concurrent_marking_minimal_symbols',
+ 'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols',
+ # TODO(machenbach): Remove after switching to x64 on infra side.
+ 'v8_win_dbg': 'debug_x86_trybot',
+ 'v8_win_compile_dbg': 'debug_x86_trybot',
+ 'v8_win_rel_ng': 'release_x86_trybot',
'v8_win_nosnap_shared_rel_ng':
- 'gn_release_x86_no_snap_shared_minimal_symbols',
- # TODO(machenbach): Rename bot to msvc.
- 'v8_win64_msvc_compile_rel': 'gn_release_x64_msvc',
- 'v8_win64_dbg': 'gn_debug_x64_minimal_symbols',
- 'v8_win64_rel_ng': 'gn_release_x64_trybot',
- 'v8_mac_rel_ng': 'gn_release_x86_trybot',
- 'v8_mac_dbg': 'gn_debug_x86_trybot',
- 'v8_mac_gc_stress_dbg': 'gn_debug_x86_trybot',
- 'v8_mac64_rel': 'gn_release_x64_trybot',
- 'v8_mac64_dbg': 'gn_debug_x64_minimal_symbols',
- 'v8_mac64_asan_rel': 'gn_release_x64_asan_no_lsan',
- 'v8_linux_arm_rel_ng': 'gn_release_simulate_arm_trybot',
- 'v8_linux_arm_dbg': 'gn_debug_simulate_arm',
- 'v8_linux_arm_armv8a_rel': 'gn_release_simulate_arm_trybot',
- 'v8_linux_arm_armv8a_dbg': 'gn_debug_simulate_arm',
- 'v8_linux_arm64_rel_ng': 'gn_release_simulate_arm64_trybot',
- 'v8_linux_arm64_dbg': 'gn_debug_simulate_arm64',
- 'v8_linux_arm64_gc_stress_dbg': 'gn_debug_simulate_arm64',
- 'v8_linux_mipsel_compile_rel': 'gn_release_simulate_mipsel',
- 'v8_linux_mips64el_compile_rel': 'gn_release_simulate_mips64el',
- 'v8_android_arm_compile_rel': 'gn_release_android_arm',
+ 'release_x86_no_snap_shared_minimal_symbols',
+ 'v8_win64_asan_rel_ng': 'release_x64_asan_no_lsan',
+ 'v8_win64_msvc_compile_rel': 'release_x64_msvc',
+ 'v8_win64_dbg': 'debug_x64_minimal_symbols',
+ 'v8_win64_rel_ng': 'release_x64_trybot',
+ 'v8_mac_rel_ng': 'release_x86_trybot',
+ 'v8_mac_dbg': 'debug_x86_trybot',
+ 'v8_mac_gc_stress_dbg': 'debug_x86_trybot',
+ 'v8_mac64_rel': 'release_x64_trybot',
+ 'v8_mac64_dbg': 'debug_x64_minimal_symbols',
+ 'v8_mac64_asan_rel': 'release_x64_asan_no_lsan',
+ 'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot',
+ 'v8_linux_arm_dbg': 'debug_simulate_arm',
+ 'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot',
+ 'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm',
+ 'v8_linux_arm64_rel_ng': 'release_simulate_arm64_trybot',
+ 'v8_linux_arm64_dbg': 'debug_simulate_arm64',
+ 'v8_linux_arm64_gc_stress_dbg': 'debug_simulate_arm64',
+ 'v8_linux_mipsel_compile_rel': 'release_simulate_mipsel',
+ 'v8_linux_mips64el_compile_rel': 'release_simulate_mips64el',
+ 'v8_android_arm_compile_rel': 'release_android_arm',
},
},
# To ease readability, config values are ordered by:
- # gyp/gn, release/debug, arch type, other values alphabetically.
+ # release/debug, arch type, other values alphabetically.
'configs': {
# Developer default configs.
'default_debug_arm': [
- 'gn', 'debug', 'simulate_arm', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_arm', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_arm': [
- 'gn', 'debug', 'simulate_arm', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_arm', 'v8_enable_slow_dchecks'],
'default_release_arm': [
- 'gn', 'release', 'simulate_arm'],
+ 'release', 'simulate_arm'],
'default_debug_android_arm': [
- 'gn', 'debug', 'arm', 'android', 'crosscompile',
- 'v8_enable_slow_dchecks', 'v8_full_debug'],
+ 'debug', 'arm', 'android', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_android_arm': [
- 'gn', 'debug', 'arm', 'android', 'crosscompile',
- 'v8_enable_slow_dchecks' ],
+ 'debug', 'arm', 'android', 'v8_enable_slow_dchecks' ],
'default_release_android_arm': [
- 'gn', 'release', 'arm', 'android', 'crosscompile'],
+ 'release', 'arm', 'android'],
'default_debug_arm64': [
- 'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_arm64': [
- 'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks'],
'default_release_arm64': [
- 'gn', 'release', 'simulate_arm64'],
+ 'release', 'simulate_arm64'],
'default_debug_mipsel': [
- 'gn', 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_mipsel': [
- 'gn', 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks'],
'default_release_mipsel': [
- 'gn', 'release', 'simulate_mipsel'],
+ 'release', 'simulate_mipsel'],
'default_debug_mips64el': [
- 'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_mips64el': [
- 'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [
- 'gn', 'release', 'simulate_mips64el'],
+ 'release', 'simulate_mips64el'],
'default_debug_ppc': [
- 'gn', 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc': [
- 'gn', 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
'default_release_ppc': [
- 'gn', 'release', 'simulate_ppc'],
+ 'release', 'simulate_ppc'],
'default_debug_ppc64': [
- 'gn', 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc64': [
- 'gn', 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
'default_release_ppc64': [
- 'gn', 'release', 'simulate_ppc64'],
+ 'release', 'simulate_ppc64'],
'default_debug_s390': [
- 'gn', 'debug', 'simulate_s390', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_s390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390': [
- 'gn', 'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
'default_release_s390': [
- 'gn', 'release', 'simulate_s390'],
+ 'release', 'simulate_s390'],
'default_debug_s390x': [
- 'gn', 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks',
- 'v8_full_debug'],
+ 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390x': [
- 'gn', 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks'],
+ 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks'],
'default_release_s390x': [
- 'gn', 'release', 'simulate_s390x'],
+ 'release', 'simulate_s390x'],
'default_debug_x64': [
- 'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
+ 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x64': [
- 'gn', 'debug', 'x64', 'v8_enable_slow_dchecks'],
+ 'debug', 'x64', 'v8_enable_slow_dchecks'],
'default_release_x64': [
- 'gn', 'release', 'x64'],
+ 'release', 'x64'],
'default_debug_x86': [
- 'gn', 'debug', 'x86', 'v8_enable_slow_dchecks', 'v8_full_debug'],
+ 'debug', 'x86', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x86': [
- 'gn', 'debug', 'x86', 'v8_enable_slow_dchecks'],
+ 'debug', 'x86', 'v8_enable_slow_dchecks'],
'default_release_x86': [
- 'gn', 'release', 'x86'],
-
-
- # GN debug configs for simulators.
- 'gn_debug_simulate_arm': [
- 'gn', 'debug_bot', 'simulate_arm', 'swarming'],
- 'gn_debug_simulate_arm_asan_edge': [
- 'gn', 'debug_bot', 'simulate_arm', 'asan', 'edge'],
- 'gn_debug_simulate_arm64': [
- 'gn', 'debug_bot', 'simulate_arm64', 'swarming'],
- 'gn_debug_simulate_arm64_asan_edge': [
- 'gn', 'debug_bot', 'simulate_arm64', 'asan', 'lsan', 'edge'],
- 'gn_debug_simulate_arm64_no_snap': [
- 'gn', 'debug', 'simulate_arm64', 'shared', 'goma',
- 'v8_optimized_debug', 'swarming', 'v8_snapshot_none'],
- 'gn_debug_simulate_mipsel_asan_edge': [
- 'gn', 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
-
- # GN release configs for simulators.
- 'gn_release_simulate_arm': [
- 'gn', 'release_bot', 'simulate_arm', 'swarming'],
- 'gn_release_simulate_arm_trybot': [
- 'gn', 'release_trybot', 'simulate_arm', 'swarming'],
- 'gn_release_simulate_arm64': [
- 'gn', 'release_bot', 'simulate_arm64', 'swarming'],
- 'gn_release_simulate_arm64_msan': [
- 'gn', 'release_bot', 'simulate_arm64', 'msan', 'swarming'],
- 'gn_release_simulate_arm64_msan_minimal_symbols': [
- 'gn', 'release_bot', 'simulate_arm64', 'msan', 'minimal_symbols',
- 'swarming'],
- 'gn_release_simulate_arm64_msan_edge': [
- 'gn', 'release_bot', 'simulate_arm64', 'edge', 'msan'],
- 'gn_release_simulate_arm64_msan_no_origins_edge': [
- 'gn', 'release_bot', 'simulate_arm64', 'edge', 'msan_no_origins'],
- 'gn_release_simulate_arm64_trybot': [
- 'gn', 'release_trybot', 'simulate_arm64', 'swarming'],
- 'gn_release_simulate_mipsel': [
- 'gn', 'release_bot', 'simulate_mipsel', 'swarming'],
- 'gn_release_simulate_mips64el': [
- 'gn', 'release_bot', 'simulate_mips64el', 'swarming'],
- 'gn_release_simulate_ppc': [
- 'gn', 'release_bot', 'simulate_ppc', 'swarming'],
- 'gn_release_simulate_ppc64': [
- 'gn', 'release_bot', 'simulate_ppc64', 'swarming'],
- 'gn_release_simulate_s390': [
- 'gn', 'release_bot', 'simulate_s390', 'swarming'],
- 'gn_release_simulate_s390x': [
- 'gn', 'release_bot', 'simulate_s390x', 'swarming'],
-
- # GN debug configs for arm.
- 'gn_debug_arm': [
- 'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
-
- # GN release configs for arm.
- 'gn_release_arm': [
- 'gn', 'release_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
- 'gn_release_android_arm': [
- 'gn', 'release_bot', 'arm', 'android', 'crosscompile',
- 'minimal_symbols', 'swarming'],
- 'gn_release_android_arm64': [
- 'gn', 'release_bot', 'arm64', 'android', 'crosscompile',
- 'minimal_symbols', 'swarming'],
-
- # GN release configs for x64.
- 'gn_release_x64': [
- 'gn', 'release_bot', 'x64', 'swarming'],
- 'gn_release_x64_asan': [
- 'gn', 'release_bot', 'x64', 'asan', 'lsan', 'swarming'],
- 'gn_release_x64_asan_minimal_symbols': [
- 'gn', 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols',
+ 'release', 'x86'],
+
+
+ # Debug configs for simulators.
+ 'debug_simulate_arm': [
+ 'debug_bot', 'simulate_arm', 'swarming'],
+ 'debug_simulate_arm_asan_edge': [
+ 'debug_bot', 'simulate_arm', 'asan', 'edge'],
+ 'debug_simulate_arm64': [
+ 'debug_bot', 'simulate_arm64', 'swarming'],
+ 'debug_simulate_arm64_asan_edge': [
+ 'debug_bot', 'simulate_arm64', 'asan', 'lsan', 'edge'],
+ 'debug_simulate_arm64_no_snap': [
+ 'debug', 'simulate_arm64', 'shared', 'goma', 'v8_optimized_debug',
+ 'swarming', 'v8_snapshot_none'],
+ 'debug_simulate_mipsel_asan_edge': [
+ 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
+
+ # Release configs for simulators.
+ 'release_simulate_arm': [
+ 'release_bot', 'simulate_arm', 'swarming'],
+ 'release_simulate_arm_trybot': [
+ 'release_trybot', 'simulate_arm', 'swarming'],
+ 'release_simulate_arm64': [
+ 'release_bot', 'simulate_arm64', 'swarming'],
+ 'release_simulate_arm64_msan': [
+ 'release_bot', 'simulate_arm64', 'msan', 'swarming'],
+ 'release_simulate_arm64_msan_minimal_symbols': [
+ 'release_bot', 'simulate_arm64', 'msan', 'minimal_symbols', 'swarming'],
+ 'release_simulate_arm64_msan_edge': [
+ 'release_bot', 'simulate_arm64', 'edge', 'msan'],
+ 'release_simulate_arm64_msan_no_origins_edge': [
+ 'release_bot', 'simulate_arm64', 'edge', 'msan_no_origins'],
+ 'release_simulate_arm64_trybot': [
+ 'release_trybot', 'simulate_arm64', 'swarming'],
+ 'release_simulate_mipsel': [
+ 'release_bot', 'simulate_mipsel', 'swarming'],
+ 'release_simulate_mips64el': [
+ 'release_bot', 'simulate_mips64el', 'swarming'],
+ 'release_simulate_ppc': [
+ 'release_bot', 'simulate_ppc', 'swarming'],
+ 'release_simulate_ppc64': [
+ 'release_bot', 'simulate_ppc64', 'swarming'],
+ 'release_simulate_s390': [
+ 'release_bot', 'simulate_s390', 'swarming'],
+ 'release_simulate_s390x': [
+ 'release_bot', 'simulate_s390x', 'swarming'],
+
+ # Debug configs for arm.
+ 'debug_arm': [
+ 'debug_bot', 'arm', 'hard_float', 'swarming'],
+
+ # Release configs for arm.
+ 'release_arm': [
+ 'release_bot', 'arm', 'hard_float', 'swarming'],
+ 'release_android_arm': [
+ 'release_bot', 'arm', 'android', 'minimal_symbols', 'swarming'],
+ 'release_android_arm64': [
+ 'release_bot', 'arm64', 'android', 'minimal_symbols', 'swarming'],
+
+ # Release configs for x64.
+ 'release_x64': [
+ 'release_bot', 'x64', 'swarming'],
+ 'release_x64_asan': [
+ 'release_bot', 'x64', 'asan', 'lsan', 'swarming'],
+ 'release_x64_asan_minimal_symbols': [
+ 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols', 'swarming'],
+ 'release_x64_asan_minimal_symbols_coverage': [
+ 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan', 'minimal_symbols',
'swarming'],
- 'gn_release_x64_asan_minimal_symbols_coverage': [
- 'gn', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan',
- 'minimal_symbols', 'swarming'],
- 'gn_release_x64_asan_no_lsan': [
- 'gn', 'release_bot', 'x64', 'asan', 'swarming'],
- 'gn_release_x64_asan_no_lsan_edge_verify_heap': [
- 'gn', 'release_bot', 'x64', 'asan', 'edge', 'swarming',
+ 'release_x64_asan_no_lsan': [
+ 'release_bot', 'x64', 'asan', 'swarming'],
+ 'release_x64_asan_no_lsan_edge_verify_heap': [
+ 'release_bot', 'x64', 'asan', 'edge', 'swarming', 'v8_verify_heap'],
+ 'release_x64_asan_no_lsan_verify_heap': [
+ 'release_bot', 'x64', 'asan', 'swarming', 'v8_verify_heap'],
+ 'release_x64_asan_no_lsan_verify_heap_dchecks': [
+ 'release_bot', 'x64', 'asan', 'swarming', 'dcheck_always_on',
+ 'v8_enable_slow_dchecks', 'v8_verify_heap'],
+ 'release_x64_asan_symbolized_edge_verify_heap': [
+ 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
'v8_verify_heap'],
- 'gn_release_x64_asan_symbolized_edge_verify_heap': [
- 'gn', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
- 'v8_verify_heap'],
- 'gn_release_x64_cfi': [
- 'gn', 'release_bot', 'x64', 'cfi', 'swarming'],
- 'gn_release_x64_cfi_clusterfuzz': [
- 'gn', 'release_bot', 'x64', 'cfi_clusterfuzz'],
- 'gn_release_x64_msvc': [
- 'gn', 'release_bot', 'x64', 'msvc', 'swarming'],
- 'gn_release_x64_concurrent_marking': [
- 'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'swarming'],
- 'gn_release_x64_correctness_fuzzer' : [
- 'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer', 'swarming'],
- 'gn_release_x64_fuchsia': [
- 'gn', 'release_bot', 'x64', 'fuchsia', 'swarming'],
- 'gn_release_x64_fuchsia_trybot': [
- 'gn', 'release_trybot', 'x64', 'fuchsia', 'swarming'],
- 'gn_release_x64_gcc_coverage': [
- 'gn', 'release_bot', 'x64', 'coverage', 'gcc'],
- 'gn_release_x64_internal': [
- 'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
- 'gn_release_x64_minimal_symbols': [
- 'gn', 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
- 'gn_release_x64_trybot': [
- 'gn', 'release_trybot', 'x64', 'swarming'],
- 'gn_release_x64_test_features_trybot': [
- 'gn', 'release_trybot', 'x64', 'swarming', 'v8_enable_test_features'],
- 'gn_release_x64_tsan': [
- 'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
- 'gn_release_x64_tsan_concurrent_marking': [
- 'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan',
- 'swarming'],
- 'gn_release_x64_tsan_concurrent_marking_minimal_symbols': [
- 'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan',
+ 'release_x64_cfi': [
+ 'release_bot', 'x64', 'cfi', 'swarming'],
+ 'release_x64_cfi_clusterfuzz': [
+ 'release_bot', 'x64', 'cfi_clusterfuzz'],
+ 'release_x64_msvc': [
+ 'release_bot', 'x64', 'msvc', 'swarming'],
+ 'release_x64_concurrent_marking': [
+ 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'swarming'],
+ 'release_x64_correctness_fuzzer' : [
+ 'release_bot', 'x64', 'v8_correctness_fuzzer', 'swarming'],
+ 'release_x64_fuchsia': [
+ 'release_bot', 'x64', 'fuchsia', 'swarming'],
+ 'release_x64_fuchsia_trybot': [
+ 'release_trybot', 'x64', 'fuchsia', 'swarming'],
+ 'release_x64_gcc_coverage': [
+ 'release_bot', 'x64', 'coverage', 'gcc'],
+ 'release_x64_internal': [
+ 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
+ 'release_x64_minimal_symbols': [
+ 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
+ 'release_x64_no_snap': [
+ 'release_bot', 'x64', 'swarming', 'v8_snapshot_none'],
+ 'release_x64_trybot': [
+ 'release_trybot', 'x64', 'swarming'],
+ 'release_x64_test_features_trybot': [
+ 'release_trybot', 'x64', 'swarming', 'v8_enable_test_features'],
+ 'release_x64_tsan': [
+ 'release_bot', 'x64', 'tsan', 'swarming'],
+ 'release_x64_tsan_concurrent_marking': [
+ 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan', 'swarming'],
+ 'release_x64_tsan_concurrent_marking_minimal_symbols': [
+ 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan',
'minimal_symbols', 'swarming'],
- 'gn_release_x64_tsan_minimal_symbols': [
- 'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
- 'gn_release_x64_ubsan_vptr': [
- 'gn', 'release_bot', 'x64', 'ubsan_vptr', 'swarming'],
- 'gn_release_x64_ubsan_vptr_recover_edge': [
- 'gn', 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover', 'swarming'],
- 'gn_release_x64_ubsan_vptr_minimal_symbols': [
- 'gn', 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols', 'swarming'],
- 'gn_release_x64_verify_csa': [
- 'gn', 'release_bot', 'x64', 'swarming', 'dcheck_always_on',
+ 'release_x64_tsan_minimal_symbols': [
+ 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
+ 'release_x64_ubsan_vptr': [
+ 'release_bot', 'x64', 'ubsan_vptr', 'swarming'],
+ 'release_x64_ubsan_vptr_recover_edge': [
+ 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover', 'swarming'],
+ 'release_x64_ubsan_vptr_minimal_symbols': [
+ 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols', 'swarming'],
+ 'release_x64_verify_csa': [
+ 'release_bot', 'x64', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
- # GN debug configs for x64.
- 'gn_debug_x64': [
- 'gn', 'debug_bot', 'x64', 'swarming'],
- 'gn_debug_x64_asan_edge': [
- 'gn', 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
- 'gn_debug_x64_asan_no_lsan_static_edge': [
- 'gn', 'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
- 'v8_optimized_debug', 'x64', 'asan', 'edge', 'swarming'],
- 'gn_debug_x64_custom': [
- 'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
- 'gn_debug_x64_fuchsia': [
- 'gn', 'debug_bot', 'x64', 'fuchsia', 'swarming'],
- 'gn_debug_x64_gcc': [
- 'gn', 'debug_bot', 'x64', 'gcc'],
- 'gn_debug_x64_minimal_symbols': [
- 'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
-
- # GN debug configs for x86.
- 'gn_debug_x86': [
- 'gn', 'debug_bot', 'x86', 'swarming'],
- 'gn_debug_x86_minimal_symbols': [
- 'gn', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
- 'gn_debug_x86_no_i18n': [
- 'gn', 'debug_bot', 'x86', 'swarming', 'v8_no_i18n'],
- 'gn_debug_x86_no_snap': [
- 'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
- 'gn_debug_x86_no_snap_trybot': [
- 'gn', 'debug_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
- 'gn_debug_x86_trybot': [
- 'gn', 'debug_trybot', 'x86', 'swarming'],
- 'gn_debug_x86_vtunejit': [
- 'gn', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
- 'gn_full_debug_x86': [
- 'gn', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
+ # Debug configs for x64.
+ 'debug_x64': [
+ 'debug_bot', 'x64', 'swarming'],
+ 'debug_x64_asan_edge': [
+ 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
+ 'debug_x64_asan_no_lsan_static_edge': [
+ 'debug', 'static', 'goma', 'v8_enable_slow_dchecks', 'v8_optimized_debug',
+ 'x64', 'asan', 'edge', 'swarming'],
+ 'debug_x64_custom': [
+ 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
+ 'debug_x64_fuchsia': [
+ 'debug_bot', 'x64', 'fuchsia', 'swarming'],
+ 'debug_x64_gcc': [
+ 'debug_bot', 'x64', 'gcc'],
+ 'debug_x64_minimal_symbols': [
+ 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
+ 'debug_x64_no_snap': [
+ 'debug_bot', 'x64', 'swarming', 'v8_snapshot_none'],
+
+ # Debug configs for x86.
+ 'debug_x86': [
+ 'debug_bot', 'x86', 'swarming'],
+ 'debug_x86_minimal_symbols': [
+ 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
+ 'debug_x86_no_i18n': [
+ 'debug_bot', 'x86', 'swarming', 'v8_no_i18n'],
+ 'debug_x86_no_snap': [
+ 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'debug_x86_no_snap_trybot': [
+ 'debug_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'debug_x86_trybot': [
+ 'debug_trybot', 'x86', 'swarming'],
+ 'debug_x86_vtunejit': [
+ 'debug_bot', 'x86', 'v8_enable_vtunejit'],
+ 'full_debug_x86': [
+ 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
'v8_full_debug'],
- # GN release configs for x86.
- 'gn_release_x86': [
- 'gn', 'release_bot', 'x86', 'swarming'],
- 'gn_release_x86_asan_no_lsan': [
- 'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming'],
- 'gn_release_x86_asan_no_lsan_verify_heap': [
- 'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming',
- 'v8_verify_heap'],
- 'gn_release_x86_asan_no_lsan_verify_heap_dchecks': [
- 'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming',
- 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap'],
- 'gn_release_x86_disassembler': [
- 'gn', 'release_bot', 'x86', 'v8_enable_disassembler'],
- 'gn_release_x86_gcc': [
- 'gn', 'release_bot', 'x86', 'gcc'],
- 'gn_release_x86_gcc_minimal_symbols': [
- 'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
- 'gn_release_x86_gcmole': [
- 'gn', 'release_bot', 'x86', 'gcmole', 'swarming'],
- 'gn_release_x86_gcmole_trybot': [
- 'gn', 'release_trybot', 'x86', 'gcmole', 'swarming'],
- 'gn_release_x86_interpreted_regexp': [
- 'gn', 'release_bot', 'x86', 'v8_interpreted_regexp'],
- 'gn_release_x86_minimal_symbols': [
- 'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
- 'gn_release_x86_no_i18n_trybot': [
- 'gn', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
- 'gn_release_x86_no_snap': [
- 'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
- 'gn_release_x86_no_snap_shared_minimal_symbols': [
- 'gn', 'release', 'x86', 'goma', 'minimal_symbols', 'shared', 'swarming',
+ # Release configs for x86.
+ 'release_x86': [
+ 'release_bot', 'x86', 'swarming'],
+ 'release_x86_disassembler': [
+ 'release_bot', 'x86', 'v8_enable_disassembler'],
+ 'release_x86_gcc': [
+ 'release_bot', 'x86', 'gcc'],
+ 'release_x86_gcc_minimal_symbols': [
+ 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
+ 'release_x86_gcmole': [
+ 'release_bot', 'x86', 'gcmole', 'swarming'],
+ 'release_x86_gcmole_trybot': [
+ 'release_trybot', 'x86', 'gcmole', 'swarming'],
+ 'release_x86_interpreted_regexp': [
+ 'release_bot', 'x86', 'v8_interpreted_regexp'],
+ 'release_x86_minimal_symbols': [
+ 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
+ 'release_x86_no_i18n_trybot': [
+ 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
+ 'release_x86_no_snap': [
+ 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'release_x86_no_snap_shared_minimal_symbols': [
+ 'release', 'x86', 'goma', 'minimal_symbols', 'shared', 'swarming',
'v8_snapshot_none'],
- 'gn_release_x86_no_snap_trybot': [
- 'gn', 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
- 'gn_release_x86_predictable': [
- 'gn', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
- 'gn_release_x86_shared_verify_heap': [
- 'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
- 'gn_release_x86_trybot': [
- 'gn', 'release_trybot', 'x86', 'swarming'],
- 'gn_release_x86_verify_csa': [
- 'gn', 'release_bot', 'x86', 'swarming', 'dcheck_always_on',
+ 'release_x86_no_snap_trybot': [
+ 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'release_x86_predictable': [
+ 'release_bot', 'x86', 'v8_enable_verify_predictable'],
+ 'release_x86_shared_verify_heap': [
+ 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
+ 'release_x86_trybot': [
+ 'release_trybot', 'x86', 'swarming'],
+ 'release_x86_verify_csa': [
+ 'release_bot', 'x86', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
- # Gyp release configs for mips.
- 'gyp_release_mips_no_snap_no_i18n': [
- 'gyp', 'release', 'mips', 'crosscompile', 'no_sysroot', 'static',
- 'v8_no_i18n', 'v8_snapshot_none'],
-
- # Gyp release configs for x64.
- 'gyp_release_x64': [
- 'gyp', 'release_bot', 'x64', 'no_sysroot', 'swarming'],
+ # Release configs for mips.
+ 'release_mips_no_snap_no_i18n': [
+ 'release', 'mips', 'no_sysroot', 'static', 'v8_no_i18n',
+ 'v8_snapshot_none'],
},
'mixins': {
'android': {
'gn_args': 'target_os="android" v8_android_log_stdout=true',
- 'gyp_defines': 'OS=android v8_android_log_stdout=1',
},
'arm': {
'gn_args': 'target_cpu="arm"',
- 'gyp_defines': 'target_arch=arm',
},
'arm64': {
'gn_args': 'target_cpu="arm64"',
- 'gyp_defines': 'target_arch=arm64',
},
'asan': {
- 'mixins': ['v8_enable_test_features'],
+ 'mixins': ['clang', 'v8_enable_test_features'],
'gn_args': 'is_asan=true',
- 'gyp_defines': 'clang=1 asan=1',
},
'bb': {
'gn_args': 'sanitizer_coverage_flags="bb,trace-pc-guard"',
- 'gyp_defines': 'sanitizer_coverage=bb,trace-pc-guard',
},
'cfi': {
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=false'),
- 'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
},
'cfi_clusterfuzz': {
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=true'),
- 'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
},
'clang': {
'gn_args': 'is_clang=true',
- 'gyp_defines': 'clang=1',
},
'coverage': {
'gn_args': 'v8_code_coverage=true',
- 'gyp_defines': 'coverage=1',
- },
-
- 'crosscompile': {
- 'gyp_crosscompile': True,
},
'dcheck_always_on': {
'gn_args': 'dcheck_always_on=true',
- 'gyp_defines': 'dcheck_always_on=1',
},
'debug': {
'gn_args': 'is_debug=true v8_enable_backtrace=true',
- 'gyp_defines': 'v8_enable_backtrace=1',
},
'debug_bot': {
@@ -618,7 +585,6 @@
'edge': {
'gn_args': 'sanitizer_coverage_flags="trace-pc-guard"',
- 'gyp_defines': 'sanitizer_coverage=trace-pc-guard',
},
'fuchsia': {
@@ -628,44 +594,36 @@
'gcc': {
# TODO(machenbach): Remove cxx11 restriction when updating gcc version.
'gn_args': 'is_clang=false use_cxx11=true',
- 'gyp_defines': 'clang=0',
},
'gcmole': {
'gn_args': 'v8_gcmole=true',
- 'gyp_defines': 'gcmole=1',
},
- 'gn': {'type': 'gn'},
-
'goma': {
- # The MB code will properly escape goma_dir if necessary in the GYP
- # code path; the GN code path needs no escaping.
'gn_args': 'use_goma=true',
- 'gyp_defines': 'use_goma=1',
},
- 'gyp': {'type': 'gyp'},
-
'hard_float': {
'gn_args': 'arm_float_abi="hard"',
- 'gyp_defines': 'arm_float_abi=hard',
},
'lsan': {
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_lsan=true',
- 'gyp_defines': 'lsan=1',
},
'minimal_symbols': {
'gn_args': 'symbol_level=1',
- 'gyp_defines': 'fastbuild=1',
},
'mips': {
+ 'mixins': ['mips_bundled_toolchain'],
'gn_args': 'target_cpu="mips"',
- 'gyp_defines': 'target_arch=mips',
+ },
+
+ 'mips_bundled_toolchain': {
+ 'gn_args': 'custom_toolchain="//tools/toolchain:mips-bundled"',
},
'msan': {
@@ -686,7 +644,6 @@
'no_sysroot': {
'gn_args': 'use_sysroot=false',
- 'gyp_defines': 'use_sysroot=0',
},
'release': {
@@ -703,71 +660,57 @@
'shared': {
'gn_args': 'is_component_build=true',
- 'gyp_defines': 'component=shared_library',
},
'simulate_arm': {
'gn_args': 'target_cpu="x86" v8_target_cpu="arm"',
- 'gyp_defines': 'target_arch=ia32 v8_target_arch=arm',
},
'simulate_arm64': {
'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"',
- 'gyp_defines': 'target_arch=x64 v8_target_arch=arm64',
},
'simulate_mipsel': {
'gn_args':
'target_cpu="x86" v8_target_cpu="mipsel" mips_arch_variant="r2"',
- 'gyp_defines': 'target_arch=ia32 v8_target_arch=mipsel',
},
'simulate_mips64el': {
'gn_args': 'target_cpu="x64" v8_target_cpu="mips64el"',
- 'gyp_defines': 'target_arch=x64 v8_target_arch=mips64el',
},
'simulate_ppc': {
'gn_args': 'target_cpu="x86" v8_target_cpu="ppc"',
- 'gyp_defines': 'target_arch=ia32 v8_target_arch=ppc',
},
'simulate_ppc64': {
'gn_args': 'target_cpu="x64" v8_target_cpu="ppc64"',
- 'gyp_defines': 'target_arch=x64 v8_target_arch=ppc64',
},
'simulate_s390': {
'gn_args': 'target_cpu="x86" v8_target_cpu="s390"',
- 'gyp_defines': 'target_arch=ia32 v8_target_arch=s390',
},
'simulate_s390x': {
'gn_args': 'target_cpu="x64" v8_target_cpu="s390x"',
- 'gyp_defines': 'target_arch=x64 v8_target_arch=s390x',
},
'static': {
'gn_args': 'is_component_build=false',
- 'gyp_defines': 'component=static_library',
},
'swarming': {
'gn_args': 'v8_test_isolation_mode="prepare"',
- 'gyp_defines': 'test_isolation_mode=prepare',
},
# TODO(machenbach): Remove the symbolized config after the bots are gone.
'symbolized': {
'gn_args': 'v8_no_inline=true',
- 'gyp_defines':
- 'release_extra_cflags="-fno-inline-functions -fno-inline"',
},
'tsan': {
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_tsan=true',
- 'gyp_defines': 'clang=1 tsan=1',
},
'ubsan_vptr': {
@@ -786,7 +729,6 @@
'v8_no_i18n': {
'gn_args': 'v8_enable_i18n_support=false icu_use_data_file=false',
- 'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
},
'v8_enable_concurrent_marking': {
@@ -800,12 +742,10 @@
'v8_enable_disassembler': {
'gn_args': 'v8_enable_disassembler=true',
- 'gyp_defines': 'v8_enable_disassembler=1',
},
'v8_enable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=true',
- 'gyp_defines': 'v8_enable_slow_dchecks=1',
},
'v8_enable_test_features': {
@@ -814,50 +754,39 @@
'v8_enable_verify_predictable': {
'gn_args': 'v8_enable_verify_predictable=true',
- 'gyp_defines': 'v8_enable_verify_predictable=1',
},
'v8_enable_vtunejit': {
'gn_args': 'v8_enable_vtunejit=true',
- 'gyp_defines': 'v8_enable_vtunejit=1',
},
'v8_full_debug': {
'gn_args': 'v8_optimized_debug=false',
- 'gyp_defines': 'v8_optimized_debug=0',
},
'v8_interpreted_regexp': {
'gn_args': 'v8_interpreted_regexp=true',
- 'gyp_defines': 'v8_interpreted_regexp=1',
},
'v8_optimized_debug': {
# This is the default in gn for debug.
- 'gyp_defines': 'v8_optimized_debug=1',
},
'v8_snapshot_custom': {
# GN path is relative to project root.
'gn_args': 'v8_embed_script="test/mjsunit/mjsunit.js"',
-
- # Gyp path is relative to src/v8.gyp.
- 'gyp_defines': 'embed_script=../test/mjsunit/mjsunit.js',
},
'v8_snapshot_internal': {
'gn_args': 'v8_use_external_startup_data=false',
- 'gyp_defines': 'v8_use_external_startup_data=0',
},
'v8_snapshot_none': {
'gn_args': 'v8_use_snapshot=false',
- 'gyp_defines': 'v8_use_snapshot=false',
},
'v8_verify_heap': {
'gn_args': 'v8_enable_verify_heap=true',
- 'gyp_defines': 'v8_enable_verify_heap=1',
},
'v8_verify_csa': {
@@ -866,12 +795,10 @@
'x64': {
'gn_args': 'target_cpu="x64"',
- 'gyp_defines': 'target_arch=x64',
},
'x86': {
'gn_args': 'target_cpu="x86"',
- 'gyp_defines': 'target_arch=ia32',
},
},
}
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index f22407a837..9af1c0b23b 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -650,10 +650,10 @@ MaybeLocal<String> ReadFile(Isolate* isolate, const string& name) {
size_t size = ftell(file);
rewind(file);
- char* chars = new char[size + 1];
- chars[size] = '\0';
+ std::unique_ptr<char> chars(new char[size + 1]);
+ chars.get()[size] = '\0';
for (size_t i = 0; i < size;) {
- i += fread(&chars[i], 1, size - i, file);
+ i += fread(&chars.get()[i], 1, size - i, file);
if (ferror(file)) {
fclose(file);
return MaybeLocal<String>();
@@ -661,8 +661,7 @@ MaybeLocal<String> ReadFile(Isolate* isolate, const string& name) {
}
fclose(file);
MaybeLocal<String> result = String::NewFromUtf8(
- isolate, chars, NewStringType::kNormal, static_cast<int>(size));
- delete[] chars;
+ isolate, chars.get(), NewStringType::kNormal, static_cast<int>(size));
return result;
}
diff --git a/deps/v8/samples/samples.gyp b/deps/v8/samples/samples.gyp
deleted file mode 100644
index e7c26cf262..0000000000
--- a/deps/v8/samples/samples.gyp
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- 'v8_enable_i18n_support%': 1,
- 'v8_toolset_for_shell%': 'target',
- },
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
- 'target_defaults': {
- 'type': 'executable',
- 'dependencies': [
- '../src/v8.gyp:v8',
- '../src/v8.gyp:v8_libbase',
- '../src/v8.gyp:v8_libplatform',
- ],
- 'include_dirs': [
- '..',
- ],
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ],
- }],
- ['OS=="win" and v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icudata',
- ],
- }],
- ],
- },
- 'targets': [
- {
- 'target_name': 'v8_shell',
- 'sources': [
- 'shell.cc',
- ],
- 'conditions': [
- [ 'want_separate_host_toolset==1', {
- 'toolsets': [ '<(v8_toolset_for_shell)', ],
- }],
- ],
- },
- {
- 'target_name': 'hello-world',
- 'sources': [
- 'hello-world.cc',
- ],
- },
- {
- 'target_name': 'process',
- 'sources': [
- 'process.cc',
- ],
- },
- ],
-}
diff --git a/deps/v8/src/PRESUBMIT.py b/deps/v8/src/PRESUBMIT.py
index d928a60689..b97eefaeb0 100644
--- a/deps/v8/src/PRESUBMIT.py
+++ b/deps/v8/src/PRESUBMIT.py
@@ -24,6 +24,6 @@ def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
- 'master.tryserver.chromium.linux:linux_chromium_rel_ng'
+ 'luci.chromium.try:linux_chromium_rel_ng'
],
'Automatically added layout test trybots to run tests on CQ.')
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index e17de159c1..5493b34789 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -143,6 +143,8 @@ void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) {
+ DCHECK_EQ(address, AlignedAddress(address, alignment));
+ DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result =
@@ -160,6 +162,7 @@ void* AllocatePages(void* address, size_t size, size_t alignment,
}
bool FreePages(void* address, const size_t size) {
+ DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
bool result = GetPageAllocator()->FreePages(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
@@ -260,7 +263,9 @@ void VirtualMemory::Free() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
- CHECK(FreePages(address, size));
+ // FreePages expects size to be aligned to allocation granularity. Trimming
+ // may leave size at only commit granularity. Align it here.
+ CHECK(FreePages(address, RoundUp(size, AllocatePageSize())));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index 1302e32b66..502b8cbdca 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -13,13 +13,16 @@
namespace v8 {
namespace internal {
-Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
+Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
Isolate* isolate = this->isolate();
+ LOG(isolate, ApiObjectAccess("call", holder()));
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
+ v8::FunctionCallback f =
+ v8::ToCData<v8::FunctionCallback>(handler->callback());
if (isolate->needs_side_effect_check() &&
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<Object>();
}
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 42d58b88a8..413a72a3ae 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -19,7 +19,8 @@ template <int kArrayLength>
class CustomArgumentsBase : public Relocatable {
public:
virtual inline void IterateInstance(RootVisitor* v) {
- v->VisitRootPointers(Root::kRelocatable, values_, values_ + kArrayLength);
+ v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
+ values_ + kArrayLength);
}
protected:
@@ -215,9 +216,13 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- Handle<Object> Call(FunctionCallback f);
+ Handle<Object> Call(CallHandlerInfo* handler);
private:
+ inline JSObject* holder() {
+ return JSObject::cast(this->begin()[T::kHolderIndex]);
+ }
+
internal::Object** argv_;
int argc_;
};
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index b8f03a89a8..488b99fd25 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -285,10 +285,10 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<NumberDictionary> slow_cache =
+ Handle<SimpleNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
- if (entry == NumberDictionary::kNotFound) {
+ if (entry == SimpleNumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
@@ -313,9 +313,9 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<NumberDictionary> cache =
+ Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
- auto new_cache = NumberDictionary::Set(cache, serial_number, object);
+ auto new_cache = SimpleNumberDictionary::Set(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@@ -334,11 +334,11 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
- Handle<NumberDictionary> cache =
+ Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
- DCHECK_NE(NumberDictionary::kNotFound, entry);
- cache = NumberDictionary::DeleteEntry(cache, entry);
+ DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
+ cache = SimpleNumberDictionary::DeleteEntry(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@@ -726,7 +726,6 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined(isolate)) {
map->set_is_callable(true);
- map->set_is_constructor(true);
}
if (immutable_proto) map->set_is_immutable_proto(true);
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 455be0dd06..398f198ae5 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -5,6 +5,8 @@
#ifndef V8_API_NATIVES_H_
#define V8_API_NATIVES_H_
+#include "include/v8.h"
+#include "src/base/macros.h"
#include "src/handles.h"
#include "src/property-details.h"
@@ -62,4 +64,4 @@ class ApiNatives {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_API_NATIVES_H_
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index d258c87853..8531cd5c05 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -11,7 +11,6 @@
#include <cmath> // For isnan.
#include <limits>
#include <vector>
-#include "include/v8-debug.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
#include "include/v8-util.h"
@@ -34,6 +33,7 @@
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/debug/debug-coverage.h"
+#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-type-profile.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -49,7 +49,6 @@
#include "src/json-stringifier.h"
#include "src/messages.h"
#include "src/objects-inl.h"
-#include "src/parsing/background-parsing-task.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@@ -461,16 +460,7 @@ void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); }
void v8::ArrayBuffer::Allocator::Free(void* data, size_t length,
AllocationMode mode) {
- switch (mode) {
- case AllocationMode::kNormal: {
- Free(data, length);
- return;
- }
- case AllocationMode::kReservation: {
- UNIMPLEMENTED();
- return;
- }
- }
+ UNIMPLEMENTED();
}
void v8::ArrayBuffer::Allocator::SetProtection(
@@ -483,7 +473,7 @@ namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
- virtual void* Allocate(size_t length) {
+ void* Allocate(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@@ -494,7 +484,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
- virtual void* AllocateUninitialized(size_t length) {
+ void* AllocateUninitialized(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@@ -505,42 +495,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
- virtual void Free(void* data, size_t) { free(data); }
-
- virtual void* Reserve(size_t length) {
- size_t page_size = i::AllocatePageSize();
- size_t allocated = RoundUp(length, page_size);
- void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
- page_size, PageAllocator::kNoAccess);
- return address;
- }
-
- virtual void Free(void* data, size_t length,
- v8::ArrayBuffer::Allocator::AllocationMode mode) {
- switch (mode) {
- case v8::ArrayBuffer::Allocator::AllocationMode::kNormal: {
- return Free(data, length);
- }
- case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
- size_t page_size = i::AllocatePageSize();
- size_t allocated = RoundUp(length, page_size);
- CHECK(i::FreePages(data, allocated));
- return;
- }
- }
- }
-
- virtual void SetProtection(
- void* data, size_t length,
- v8::ArrayBuffer::Allocator::Protection protection) {
- DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
- protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
- PageAllocator::Permission permission =
- (protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
- ? PageAllocator::kReadWrite
- : PageAllocator::kNoAccess;
- CHECK(i::SetPermissions(data, length, permission));
- }
+ void Free(void* data, size_t) override { free(data); }
};
bool RunExtraCode(Isolate* isolate, Local<Context> context,
@@ -1069,6 +1024,10 @@ void* V8::ClearWeak(i::Object** location) {
return i::GlobalHandles::ClearWeakness(location);
}
+void V8::AnnotateStrongRetainer(i::Object** location, const char* label) {
+ i::GlobalHandles::AnnotateStrongRetainer(location, label);
+}
+
void V8::DisposeGlobal(i::Object** location) {
i::GlobalHandles::Destroy(location);
}
@@ -2069,11 +2028,9 @@ bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; }
void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); }
-
ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
Encoding encoding)
- : impl_(new i::StreamedSource(stream, encoding)) {}
-
+ : impl_(new i::ScriptStreamingData(stream, encoding)) {}
ScriptCompiler::StreamedSource::~StreamedSource() { delete impl_; }
@@ -2358,6 +2315,37 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
RETURN_ESCAPED(result);
}
+namespace {
+
+i::Compiler::ScriptDetails GetScriptDetails(
+ i::Isolate* isolate, Local<Value> resource_name,
+ Local<Integer> resource_line_offset, Local<Integer> resource_column_offset,
+ Local<Value> source_map_url, Local<PrimitiveArray> host_defined_options) {
+ i::Compiler::ScriptDetails script_details;
+ if (!resource_name.IsEmpty()) {
+ script_details.name_obj = Utils::OpenHandle(*(resource_name));
+ }
+ if (!resource_line_offset.IsEmpty()) {
+ script_details.line_offset =
+ static_cast<int>(resource_line_offset->Value());
+ }
+ if (!resource_column_offset.IsEmpty()) {
+ script_details.column_offset =
+ static_cast<int>(resource_column_offset->Value());
+ }
+ script_details.host_defined_options = isolate->factory()->empty_fixed_array();
+ if (!host_defined_options.IsEmpty()) {
+ script_details.host_defined_options =
+ Utils::OpenHandle(*(host_defined_options));
+ }
+ if (!source_map_url.IsEmpty()) {
+ script_details.source_map_url = Utils::OpenHandle(*(source_map_url));
+ }
+ return script_details;
+}
+
+} // namespace
+
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options,
NoCacheReason no_cache_reason) {
@@ -2366,17 +2354,21 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler,
CompileUnbound, MaybeLocal<UnboundScript>(),
InternalEscapableScope);
- bool produce_cache = options == kProduceParserCache ||
- options == kProduceCodeCache ||
- options == kProduceFullCodeCache;
-
- // Don't try to produce any kind of cache when the debugger is loaded.
- if (isolate->debug()->is_loaded() && produce_cache) {
+ // ProduceParserCache, ProduceCodeCache, ProduceFullCodeCache and
+ // ConsumeParserCache are not supported. They are present only for
+ // backward compatability. All these options behave as kNoCompileOptions.
+ if (options == kConsumeParserCache) {
+ // We do not support parser caches anymore. Just set cached_data to
+ // rejected to signal an error.
+ options = kNoCompileOptions;
+ source->cached_data->rejected = true;
+ } else if (options == kProduceParserCache || options == kProduceCodeCache ||
+ options == kProduceFullCodeCache) {
options = kNoCompileOptions;
}
i::ScriptData* script_data = nullptr;
- if (options == kConsumeParserCache || options == kConsumeCodeCache) {
+ if (options == kConsumeCodeCache) {
DCHECK(source->cached_data);
// ScriptData takes care of pointer-aligning the data.
script_data = new i::ScriptData(source->cached_data->data,
@@ -2386,32 +2378,14 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
i::Handle<i::SharedFunctionInfo> result;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
- i::MaybeHandle<i::Object> name_obj;
- i::MaybeHandle<i::Object> source_map_url;
- i::MaybeHandle<i::FixedArray> host_defined_options =
- isolate->factory()->empty_fixed_array();
- int line_offset = 0;
- int column_offset = 0;
- if (!source->resource_name.IsEmpty()) {
- name_obj = Utils::OpenHandle(*(source->resource_name));
- }
- if (!source->host_defined_options.IsEmpty()) {
- host_defined_options = Utils::OpenHandle(*(source->host_defined_options));
- }
- if (!source->resource_line_offset.IsEmpty()) {
- line_offset = static_cast<int>(source->resource_line_offset->Value());
- }
- if (!source->resource_column_offset.IsEmpty()) {
- column_offset = static_cast<int>(source->resource_column_offset->Value());
- }
- if (!source->source_map_url.IsEmpty()) {
- source_map_url = Utils::OpenHandle(*(source->source_map_url));
- }
+ i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ isolate, source->resource_name, source->resource_line_offset,
+ source->resource_column_offset, source->source_map_url,
+ source->host_defined_options);
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- str, name_obj, line_offset, column_offset, source->resource_options,
- source_map_url, isolate->native_context(), nullptr, &script_data,
- options, no_cache_reason, i::NOT_NATIVES_CODE, host_defined_options);
+ str, script_details, source->resource_options, nullptr, &script_data,
+ options, no_cache_reason, i::NOT_NATIVES_CODE);
has_pending_exception = !maybe_function_info.ToHandle(&result);
if (has_pending_exception && script_data != nullptr) {
// This case won't happen during normal operation; we have compiled
@@ -2422,13 +2396,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
RETURN_ON_FAILED_EXECUTION(UnboundScript);
- if (produce_cache && script_data != nullptr) {
- // script_data now contains the data that was generated. source will
- // take the ownership.
- source->cached_data = new CachedData(
- script_data->data(), script_data->length(), CachedData::BufferOwned);
- script_data->ReleaseDataOwnership();
- } else if (options == kConsumeParserCache || options == kConsumeCodeCache) {
+ if (options == kConsumeCodeCache) {
source->cached_data->rejected = script_data->rejected();
}
delete script_data;
@@ -2593,9 +2561,11 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
if (!i::FLAG_script_streaming) {
return nullptr;
}
+ // We don't support other compile options on streaming background compiles.
+ // TODO(rmcilroy): remove CompileOptions from the API.
+ CHECK(options == ScriptCompiler::kNoCompileOptions);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- return new i::BackgroundParsingTask(source->impl(), options,
- i::FLAG_stack_size, isolate);
+ return i::Compiler::NewBackgroundCompileTask(source->impl(), isolate);
}
@@ -2605,59 +2575,24 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
const ScriptOrigin& origin) {
PREPARE_FOR_EXECUTION(context, ScriptCompiler, Compile, Script);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
- i::StreamedSource* source = v8_source->impl();
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileStreamedScript");
+
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
- i::Handle<i::Script> script = isolate->factory()->NewScript(str);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- i::Script::InitLineEnds(script);
- }
- if (!origin.ResourceName().IsEmpty()) {
- script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
- }
- if (!origin.HostDefinedOptions().IsEmpty()) {
- script->set_host_defined_options(
- *Utils::OpenHandle(*(origin.HostDefinedOptions())));
- }
- if (!origin.ResourceLineOffset().IsEmpty()) {
- script->set_line_offset(
- static_cast<int>(origin.ResourceLineOffset()->Value()));
- }
- if (!origin.ResourceColumnOffset().IsEmpty()) {
- script->set_column_offset(
- static_cast<int>(origin.ResourceColumnOffset()->Value()));
- }
- script->set_origin_options(origin.Options());
- if (!origin.SourceMapUrl().IsEmpty()) {
- script->set_source_mapping_url(
- *Utils::OpenHandle(*(origin.SourceMapUrl())));
- }
+ i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ isolate, origin.ResourceName(), origin.ResourceLineOffset(),
+ origin.ResourceColumnOffset(), origin.SourceMapUrl(),
+ origin.HostDefinedOptions());
+ i::ScriptStreamingData* streaming_data = v8_source->impl();
- source->info->set_script(script);
- source->parser->UpdateStatistics(isolate, script);
- source->info->UpdateBackgroundParseStatisticsOnMainThread(isolate);
- source->parser->HandleSourceURLComments(isolate, script);
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForStreamedScript(
+ str, script_details, origin.Options(), streaming_data);
i::Handle<i::SharedFunctionInfo> result;
- if (source->info->literal() == nullptr) {
- // Parsing has failed - report error messages.
- source->info->pending_error_handler()->ReportErrors(
- isolate, script, source->info->ast_value_factory());
- } else {
- // Parsing has succeeded - finalize compile.
- if (i::FLAG_background_compile) {
- result = i::Compiler::GetSharedFunctionInfoForBackgroundCompile(
- script, source->info.get(), str->length(),
- source->outer_function_job.get(), &source->inner_function_jobs);
- } else {
- result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
- script, source->info.get(), str->length());
- }
- }
- has_pending_exception = result.is_null();
+ has_pending_exception = !maybe_function_info.ToHandle(&result);
if (has_pending_exception) isolate->ReportPendingMessages();
- source->Release();
-
RETURN_ON_FAILED_EXECUTION(Script);
Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
@@ -3304,6 +3239,16 @@ MaybeLocal<WasmCompiledModule> ValueDeserializer::Delegate::GetWasmModuleFromId(
return MaybeLocal<WasmCompiledModule>();
}
+MaybeLocal<SharedArrayBuffer>
+ValueDeserializer::Delegate::GetSharedArrayBufferFromId(Isolate* v8_isolate,
+ uint32_t id) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->ScheduleThrow(*isolate->factory()->NewError(
+ isolate->error_function(),
+ i::MessageTemplate::kDataCloneDeserializationError));
+ return MaybeLocal<SharedArrayBuffer>();
+}
+
struct ValueDeserializer::PrivateData {
PrivateData(i::Isolate* i, i::Vector<const uint8_t> data, Delegate* delegate)
: isolate(i), deserializer(i, data, delegate) {}
@@ -3544,24 +3489,22 @@ bool Value::IsWebAssemblyCompiledModule() const {
js_obj->map()->GetConstructor();
}
-#define VALUE_IS_SPECIFIC_TYPE(Type, Class) \
- bool Value::Is##Type() const { \
- i::Handle<i::Object> obj = Utils::OpenHandle(this); \
- if (!obj->IsHeapObject()) return false; \
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); \
- return obj->HasSpecificClassOf(isolate->heap()->Class##_string()); \
+#define VALUE_IS_SPECIFIC_TYPE(Type, Check) \
+ bool Value::Is##Type() const { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ return obj->Is##Check(); \
}
-VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, Arguments)
-VALUE_IS_SPECIFIC_TYPE(BooleanObject, Boolean)
-VALUE_IS_SPECIFIC_TYPE(NumberObject, Number)
-VALUE_IS_SPECIFIC_TYPE(StringObject, String)
-VALUE_IS_SPECIFIC_TYPE(SymbolObject, Symbol)
-VALUE_IS_SPECIFIC_TYPE(Date, Date)
-VALUE_IS_SPECIFIC_TYPE(Map, Map)
-VALUE_IS_SPECIFIC_TYPE(Set, Set)
-VALUE_IS_SPECIFIC_TYPE(WeakMap, WeakMap)
-VALUE_IS_SPECIFIC_TYPE(WeakSet, WeakSet)
+VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, JSArgumentsObject)
+VALUE_IS_SPECIFIC_TYPE(BooleanObject, BooleanWrapper)
+VALUE_IS_SPECIFIC_TYPE(NumberObject, NumberWrapper)
+VALUE_IS_SPECIFIC_TYPE(StringObject, StringWrapper)
+VALUE_IS_SPECIFIC_TYPE(SymbolObject, SymbolWrapper)
+VALUE_IS_SPECIFIC_TYPE(Date, JSDate)
+VALUE_IS_SPECIFIC_TYPE(Map, JSMap)
+VALUE_IS_SPECIFIC_TYPE(Set, JSSet)
+VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
+VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
#undef VALUE_IS_SPECIFIC_TYPE
@@ -3953,55 +3896,36 @@ void v8::SharedArrayBuffer::CheckCast(Value* that) {
void v8::Date::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Date_string()),
- "v8::Date::Cast()", "Could not convert to date");
+ Utils::ApiCheck(obj->IsJSDate(), "v8::Date::Cast()",
+ "Could not convert to date");
}
void v8::StringObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->String_string()),
- "v8::StringObject::Cast()",
+ Utils::ApiCheck(obj->IsStringWrapper(), "v8::StringObject::Cast()",
"Could not convert to StringObject");
}
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
- "v8::SymbolObject::Cast()",
+ Utils::ApiCheck(obj->IsSymbolWrapper(), "v8::SymbolObject::Cast()",
"Could not convert to SymbolObject");
}
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Number_string()),
- "v8::NumberObject::Cast()",
+ Utils::ApiCheck(obj->IsNumberWrapper(), "v8::NumberObject::Cast()",
"Could not convert to NumberObject");
}
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- i::Isolate* isolate = nullptr;
- if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
- Utils::ApiCheck(
- isolate != nullptr &&
- obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
- "v8::BooleanObject::Cast()", "Could not convert to BooleanObject");
+ Utils::ApiCheck(obj->IsBooleanWrapper(), "v8::BooleanObject::Cast()",
+ "Could not convert to BooleanObject");
}
@@ -4432,7 +4356,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
desc.set_enumerable(false);
desc.set_configurable(true);
desc.set_value(value_obj);
- return i::JSProxy::SetPrivateProperty(
+ return i::JSProxy::SetPrivateSymbol(
isolate, i::Handle<i::JSProxy>::cast(self),
i::Handle<i::Symbol>::cast(key_obj), &desc, i::kDontThrow);
}
@@ -4577,10 +4501,10 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
v8::IndexFilter::kIncludeIndices);
}
-MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
- KeyCollectionMode mode,
- PropertyFilter property_filter,
- IndexFilter index_filter) {
+MaybeLocal<Array> v8::Object::GetPropertyNames(
+ Local<Context> context, KeyCollectionMode mode,
+ PropertyFilter property_filter, IndexFilter index_filter,
+ KeyConversionMode key_conversion) {
PREPARE_FOR_EXECUTION(context, Object, GetPropertyNames, Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
@@ -4590,7 +4514,8 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
accumulator.set_skip_indices(index_filter == IndexFilter::kSkipIndices);
has_pending_exception = accumulator.CollectKeys(self, self).IsNothing();
RETURN_ON_FAILED_EXECUTION(Array);
- value = accumulator.GetKeys(i::GetKeysConversion::kKeepNumbers);
+ value =
+ accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
self->map()->instance_descriptors()->GetEnumCache()->keys() != *value);
@@ -4614,10 +4539,11 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
}
-MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context,
- PropertyFilter filter) {
+MaybeLocal<Array> v8::Object::GetOwnPropertyNames(
+ Local<Context> context, PropertyFilter filter,
+ KeyConversionMode key_conversion) {
return GetPropertyNames(context, KeyCollectionMode::kOwnOnly, filter,
- v8::IndexFilter::kIncludeIndices);
+ v8::IndexFilter::kIncludeIndices, key_conversion);
}
MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
@@ -4754,14 +4680,14 @@ Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
return maybe;
}
-
template <typename Getter, typename Setter, typename Data>
static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
Setter setter, Data data,
AccessControl settings,
PropertyAttribute attributes,
- bool is_special_data_property) {
+ bool is_special_data_property,
+ bool replace_on_access) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing<bool>(),
i::HandleScope);
@@ -4771,7 +4697,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
v8::Local<AccessorSignature> signature;
i::Handle<i::AccessorInfo> info =
MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
- is_special_data_property, false);
+ is_special_data_property, replace_on_access);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@@ -4797,7 +4723,7 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
PropertyAttribute attribute) {
return ObjectSetAccessor(context, this, name, getter, setter,
data.FromMaybe(Local<Value>()), settings, attribute,
- i::FLAG_disable_old_api_accessors);
+ i::FLAG_disable_old_api_accessors, false);
}
@@ -4827,7 +4753,17 @@ Maybe<bool> Object::SetNativeDataProperty(v8::Local<v8::Context> context,
v8::Local<Value> data,
PropertyAttribute attributes) {
return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT,
- attributes, true);
+ attributes, true, false);
+}
+
+Maybe<bool> Object::SetLazyDataProperty(v8::Local<v8::Context> context,
+ v8::Local<Name> name,
+ AccessorNameGetterCallback getter,
+ v8::Local<Value> data,
+ PropertyAttribute attributes) {
+ return ObjectSetAccessor(context, this, name, getter,
+ static_cast<AccessorNameSetterCallback>(nullptr),
+ data, DEFAULT, attributes, true, true);
}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
@@ -7352,13 +7288,11 @@ Local<Array> Set::AsArray() const {
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
- i::Handle<i::Object> result;
+ Local<Promise::Resolver> result;
has_pending_exception =
- !i::Execution::Call(isolate, isolate->promise_internal_constructor(),
- isolate->factory()->undefined_value(), 0, nullptr)
- .ToHandle(&result);
+ !ToLocal<Promise::Resolver>(isolate->factory()->NewJSPromise(), &result);
RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
- RETURN_ESCAPED(Local<Promise::Resolver>::Cast(Utils::ToLocal(result)));
+ RETURN_ESCAPED(result);
}
@@ -7380,12 +7314,14 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
ENTER_V8(isolate, context, Promise_Resolver, Resolve, Nothing<bool>(),
i::HandleScope);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
+ auto promise = i::Handle<i::JSPromise>::cast(self);
+
+ if (promise->status() != Promise::kPending) {
+ return Just(true);
+ }
+
has_pending_exception =
- i::Execution::Call(isolate, isolate->promise_resolve(),
- isolate->factory()->undefined_value(), arraysize(argv),
- argv)
- .is_null();
+ i::JSPromise::Resolve(promise, Utils::OpenHandle(*value)).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -7403,15 +7339,14 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
ENTER_V8(isolate, context, Promise_Resolver, Reject, Nothing<bool>(),
i::HandleScope);
auto self = Utils::OpenHandle(this);
+ auto promise = i::Handle<i::JSPromise>::cast(self);
+
+ if (promise->status() != Promise::kPending) {
+ return Just(true);
+ }
- // We pass true to trigger the debugger's on exception handler.
- i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value),
- isolate->factory()->ToBoolean(true)};
has_pending_exception =
- i::Execution::Call(isolate, isolate->promise_internal_reject(),
- isolate->factory()->undefined_value(), arraysize(argv),
- argv)
- .is_null();
+ i::JSPromise::Reject(promise, Utils::OpenHandle(*value)).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -7605,8 +7540,9 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
return MaybeLocal<WasmCompiledModule>();
}
- i::MaybeHandle<i::JSObject> maybe_compiled = i::wasm::SyncCompile(
- i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
+ i::MaybeHandle<i::JSObject> maybe_compiled =
+ i_isolate->wasm_engine()->SyncCompile(
+ i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@@ -7665,30 +7601,29 @@ void WasmModuleObjectBuilderStreaming::Finish() {
}
// AsyncCompile makes its own copy of the wire bytes. This inefficiency
// will be resolved when we move to true streaming compilation.
- i::wasm::AsyncCompile(reinterpret_cast<i::Isolate*>(isolate_),
- Utils::OpenHandle(*promise_.Get(isolate_)),
- {wire_bytes.get(), wire_bytes.get() + total_size_},
- false);
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ i_isolate->wasm_engine()->AsyncCompile(
+ i_isolate, Utils::OpenHandle(*promise_.Get(isolate_)),
+ {wire_bytes.get(), wire_bytes.get() + total_size_}, false);
}
-void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
+void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
Local<Promise> promise = GetPromise();
// The promise has already been resolved, e.g. because of a compilation
// error.
if (promise->State() != v8::Promise::kPending) return;
if (i::FLAG_wasm_stream_compilation) streaming_decoder_->Abort();
- // If there is no exception, then we do not reject the promise. The reason is
- // that 'no exception' indicates that we are in a ScriptForbiddenScope, which
- // means that it is not allowed to reject the promise at the moment, or
- // execute any other JavaScript code.
+ // If no exception value is provided, we do not reject the promise. This can
+ // happen when streaming compilation gets aborted when no script execution is
+ // allowed anymore, e.g. when a browser tab gets refreshed.
if (exception.IsEmpty()) return;
Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::HandleScope scope(i_isolate);
Local<Context> context = Utils::ToLocal(handle(i_isolate->context()));
- auto maybe = resolver->Reject(context, exception);
+ auto maybe = resolver->Reject(context, exception.ToLocalChecked());
CHECK_IMPLIES(!maybe.FromMaybe(false), i_isolate->has_scheduled_exception());
}
@@ -7696,29 +7631,6 @@ WasmModuleObjectBuilderStreaming::~WasmModuleObjectBuilderStreaming() {
promise_.Reset();
}
-void WasmModuleObjectBuilder::OnBytesReceived(const uint8_t* bytes,
- size_t size) {
- std::unique_ptr<uint8_t[]> cloned_bytes(new uint8_t[size]);
- memcpy(cloned_bytes.get(), bytes, size);
- received_buffers_.push_back(
- Buffer(std::unique_ptr<const uint8_t[]>(
- const_cast<const uint8_t*>(cloned_bytes.release())),
- size));
- total_size_ += size;
-}
-
-MaybeLocal<WasmCompiledModule> WasmModuleObjectBuilder::Finish() {
- std::unique_ptr<uint8_t[]> wire_bytes(new uint8_t[total_size_]);
- uint8_t* insert_at = wire_bytes.get();
-
- for (size_t i = 0; i < received_buffers_.size(); ++i) {
- const Buffer& buff = received_buffers_[i];
- memcpy(insert_at, buff.first.get(), buff.second);
- insert_at += buff.second;
- }
- return WasmCompiledModule::Compile(isolate_, wire_bytes.get(), total_size_);
-}
-
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@@ -8690,24 +8602,20 @@ void Isolate::RunMicrotasks() {
reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
}
-
-void Isolate::EnqueueMicrotask(Local<Function> microtask) {
+void Isolate::EnqueueMicrotask(Local<Function> function) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->EnqueueMicrotask(Utils::OpenHandle(*microtask));
+ i::Handle<i::CallableTask> microtask = isolate->factory()->NewCallableTask(
+ Utils::OpenHandle(*function), isolate->native_context());
+ isolate->EnqueueMicrotask(microtask);
}
-
-void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
+void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::HandleScope scope(isolate);
- i::Handle<i::CallHandlerInfo> callback_info =
- i::Handle<i::CallHandlerInfo>::cast(
- isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::NOT_TENURED));
- SET_FIELD_WRAPPED(callback_info, set_callback, microtask);
- SET_FIELD_WRAPPED(callback_info, set_js_callback,
- callback_info->redirected_callback());
- SET_FIELD_WRAPPED(callback_info, set_data, data);
- isolate->EnqueueMicrotask(callback_info);
+ i::Handle<i::CallbackTask> microtask = isolate->factory()->NewCallbackTask(
+ isolate->factory()->NewForeign(reinterpret_cast<i::Address>(callback)),
+ isolate->factory()->NewForeign(reinterpret_cast<i::Address>(data)));
+ isolate->EnqueueMicrotask(microtask);
}
@@ -8806,6 +8714,12 @@ void Isolate::LowMemoryNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ if (!dependant_context) {
+ // We left the current context, we can abort all running WebAssembly
+ // compilations.
+ isolate->wasm_engine()->compilation_manager()->AbortAllJobs();
+ }
+ // TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
}
@@ -9139,78 +9053,6 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
// --- D e b u g S u p p o r t ---
-bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
- Local<Value> data) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (that == nullptr) {
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::HandleScope scope(i_isolate);
- i_isolate->debug()->SetDebugDelegate(nullptr, false);
- } else {
- // Might create the Debug context.
- ENTER_V8_FOR_NEW_CONTEXT(i_isolate);
- i::HandleScope scope(i_isolate);
- i::Handle<i::Object> i_data = i_isolate->factory()->undefined_value();
- if (!data.IsEmpty()) i_data = Utils::OpenHandle(*data);
- i::NativeDebugDelegate* delegate =
- new i::NativeDebugDelegate(i_isolate, that, i_data);
- i_isolate->debug()->SetDebugDelegate(delegate, true);
- }
- return true;
-}
-
-void Debug::DebugBreak(Isolate* isolate) { debug::DebugBreak(isolate); }
-
-void Debug::CancelDebugBreak(Isolate* isolate) {
- debug::CancelDebugBreak(isolate);
-}
-
-bool Debug::CheckDebugBreak(Isolate* isolate) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return internal_isolate->stack_guard()->CheckDebugBreak();
-}
-
-void Debug::SetMessageHandler(Isolate* isolate,
- v8::Debug::MessageHandler handler) {}
-
-void Debug::SendCommand(Isolate* isolate, const uint16_t* command, int length,
- ClientData* client_data) {}
-
-MaybeLocal<Value> Debug::Call(Local<Context> context,
- v8::Local<v8::Function> fun,
- v8::Local<v8::Value> data) {
- return debug::Call(context, fun, data);
-}
-
-void Debug::ProcessDebugMessages(Isolate* isolate) {}
-
-Local<Context> Debug::GetDebugContext(Isolate* isolate) {
- return debug::GetDebugContext(isolate);
-}
-
-MaybeLocal<Context> Debug::GetDebuggedContext(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- if (!i_isolate->debug()->in_debug_scope()) return MaybeLocal<Context>();
- i::Handle<i::Object> calling = i_isolate->GetCallingNativeContext();
- if (calling.is_null()) return MaybeLocal<Context>();
- return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
-}
-
-void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
- debug::SetLiveEditEnabled(isolate, enable);
-}
-
-bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) { return false; }
-
-void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) {
-}
-
-MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
- Local<Value> value) {
- return debug::GetInternalProperties(v8_isolate, value);
-}
-
void debug::SetContextId(Local<Context> context, int id) {
Utils::OpenHandle(*context)->set_debug_context_id(i::Smi::FromInt(id));
}
@@ -9653,13 +9495,11 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
ScriptOriginOptions origin_options;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- str, i::MaybeHandle<i::Object>(), 0, 0, origin_options,
- i::MaybeHandle<i::Object>(), isolate->native_context(), nullptr,
+ str, i::Compiler::ScriptDetails(), origin_options, nullptr,
&script_data, ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
- : i::INSPECTOR_CODE,
- i::MaybeHandle<i::FixedArray>());
+ : i::INSPECTOR_CODE);
has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
}
@@ -9836,6 +9676,18 @@ v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
return ToApiHandle<debug::GeneratorObject>(Utils::OpenHandle(*value));
}
+MaybeLocal<v8::Value> debug::EvaluateGlobal(v8::Isolate* isolate,
+ v8::Local<v8::String> source) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(
+ i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source)),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
void debug::QueryObjects(v8::Local<v8::Context> v8_context,
QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
@@ -10464,6 +10316,12 @@ void HeapProfiler::SetGetRetainerInfosCallback(
callback);
}
+void HeapProfiler::SetBuildEmbedderGraphCallback(
+ BuildEmbedderGraphCallback callback) {
+ reinterpret_cast<i::HeapProfiler*>(this)->SetBuildEmbedderGraphCallback(
+ callback);
+}
+
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@@ -10527,7 +10385,7 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::HandleScope scope(i_isolate);
- internal::Deoptimizer::DeoptimizeAll(i_isolate);
+ i::Deoptimizer::DeoptimizeAll(i_isolate);
}
@@ -10571,14 +10429,15 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
if (last_handle_before_deferred_block_ != nullptr &&
(last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
(last_handle_before_deferred_block_ >= block)) {
- v->VisitRootPointers(Root::kHandleScope, block,
+ v->VisitRootPointers(Root::kHandleScope, nullptr, block,
last_handle_before_deferred_block_);
DCHECK(!found_block_before_deferred);
#ifdef DEBUG
found_block_before_deferred = true;
#endif
} else {
- v->VisitRootPointers(Root::kHandleScope, block, &block[kHandleBlockSize]);
+ v->VisitRootPointers(Root::kHandleScope, nullptr, block,
+ &block[kHandleBlockSize]);
}
}
@@ -10587,7 +10446,7 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
// Iterate over live handles in the last block (if any).
if (!blocks()->empty()) {
- v->VisitRootPointers(Root::kHandleScope, blocks()->back(),
+ v->VisitRootPointers(Root::kHandleScope, nullptr, blocks()->back(),
handle_scope_data_.next);
}
@@ -10596,11 +10455,11 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
for (unsigned i = 0; i < arraysize(context_lists); i++) {
if (context_lists[i]->empty()) continue;
Object** start = reinterpret_cast<Object**>(&context_lists[i]->front());
- v->VisitRootPointers(Root::kHandleScope, start,
+ v->VisitRootPointers(Root::kHandleScope, nullptr, start,
start + context_lists[i]->size());
}
if (microtask_context_) {
- v->VisitRootPointer(Root::kHandleScope,
+ v->VisitRootPointer(Root::kHandleScope, nullptr,
reinterpret_cast<Object**>(&microtask_context_));
}
}
@@ -10670,10 +10529,11 @@ void DeferredHandles::Iterate(RootVisitor* v) {
DCHECK((first_block_limit_ >= blocks_.front()) &&
(first_block_limit_ <= &(blocks_.front())[kHandleBlockSize]));
- v->VisitRootPointers(Root::kHandleScope, blocks_.front(), first_block_limit_);
+ v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_.front(),
+ first_block_limit_);
for (size_t i = 1; i < blocks_.size(); i++) {
- v->VisitRootPointers(Root::kHandleScope, blocks_[i],
+ v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_[i],
&blocks_[i][kHandleBlockSize]);
}
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 7bd03c37da..e67f4f7d47 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -180,6 +180,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<BigInt64Array> ToLocalBigInt64Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<BigUint64Array> ToLocalBigUint64Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index f420f2e5cb..4d7d9895ce 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -46,7 +46,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
@@ -109,7 +109,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -143,27 +143,27 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -189,7 +189,7 @@ Operand::Operand(const ExternalReference& f)
value_.immediate = reinterpret_cast<int32_t>(f.address());
}
-Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE32) {
+Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -273,15 +273,13 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
}
-
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
+ Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -329,17 +327,15 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICache(isolate, pc, sizeof(target));
+ // Assembler::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@@ -357,7 +353,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 2 * kInstrSize);
+ Assembler::FlushICache(pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
@@ -377,13 +373,42 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 4 * kInstrSize);
+ Assembler::FlushICache(pc, 4 * kInstrSize);
}
}
}
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+template <typename T>
+bool UseScratchRegisterScope::CanAcquireVfp() const {
+ VfpRegList* available = assembler_->GetScratchVfpRegisterList();
+ DCHECK_NOT_NULL(available);
+ for (int index = 0; index < T::kNumRegisters; index++) {
+ T reg = T::from_code(index);
+ uint64_t mask = reg.ToVfpRegList();
+ if ((*available & mask) == mask) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <typename T>
+T UseScratchRegisterScope::AcquireVfp() {
+ VfpRegList* available = assembler_->GetScratchVfpRegisterList();
+ DCHECK_NOT_NULL(available);
+ for (int index = 0; index < T::kNumRegisters; index++) {
+ T reg = T::from_code(index);
+ uint64_t mask = reg.ToVfpRegList();
+ if ((*available & mask) == mask) {
+ *available &= ~mask;
+ return reg;
+ }
+ }
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index a615d67496..1011db4b80 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -347,22 +347,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -566,10 +564,16 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
// it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
// its use consistent with other features, we always enable it if we can.
EnableCpuFeature(VFP32DREGS);
+ // Make sure we pick two D registers which alias a Q register. This way, we
+ // can use a Q as a scratch if NEON is supported.
+ scratch_vfp_register_list_ = d14.ToVfpRegList() | d15.ToVfpRegList();
+ } else {
+ // When VFP32DREGS is not supported, d15 become allocatable. Therefore we
+ // cannot use it as a scratch.
+ scratch_vfp_register_list_ = d14.ToVfpRegList();
}
}
-
Assembler::~Assembler() {
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
@@ -1214,6 +1218,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
DCHECK(x.IsImmediate());
// Upon failure to encode, the opcode should not have changed.
DCHECK(opcode == (instr & kOpCodeMask));
+ UseScratchRegisterScope temps(this);
Condition cond = Instruction::ConditionField(instr);
if ((opcode == MOV) && !set_flags) {
// Generate a sequence of mov instructions or a load from the constant
@@ -1221,7 +1226,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
DCHECK(!rn.is_valid());
Move32BitImmediate(rd, x, cond);
} else if ((opcode == ADD) && !set_flags && (rd == rn) &&
- (scratch_register_list_ == 0)) {
+ !temps.CanAcquire()) {
// Split the operation into a sequence of additions if we cannot use a
// scratch register. In this case, we cannot re-use rn and the assembler
// does not have any scratch registers to spare.
@@ -1244,7 +1249,6 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to a scratch register and change the original instruction to
// use it.
- UseScratchRegisterScope temps(this);
// Re-use the destination register if possible.
Register scratch =
(rd.is_valid() && rd != rn && rd != pc) ? rd : temps.Acquire();
@@ -1501,6 +1505,10 @@ void Assembler::and_(Register dst, Register src1, const Operand& src2,
AddrMode1(cond | AND | s, dst, src1, src2);
}
+void Assembler::and_(Register dst, Register src1, Register src2, SBit s,
+ Condition cond) {
+ and_(dst, src1, Operand(src2), s, cond);
+}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
@@ -2367,6 +2375,11 @@ void Assembler::isb(BarrierOption option) {
}
}
+void Assembler::csdb() {
+ // Details available in Arm Cache Speculation Side-channels white paper,
+ // version 1.1, page 4.
+ emit(0xE320F014);
+}
// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
@@ -5153,8 +5166,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
- DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
- rmode != RelocInfo::NONE64);
+ DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
(rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
@@ -5474,24 +5486,24 @@ void PatchingAssembler::Emit(Address addr) {
emit(reinterpret_cast<Instr>(addr));
}
-void PatchingAssembler::FlushICache(Isolate* isolate) {
- Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
-}
-
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
- : available_(assembler->GetScratchRegisterList()),
- old_available_(*available_) {}
+ : assembler_(assembler),
+ old_available_(*assembler->GetScratchRegisterList()),
+ old_available_vfp_(*assembler->GetScratchVfpRegisterList()) {}
UseScratchRegisterScope::~UseScratchRegisterScope() {
- *available_ = old_available_;
+ *assembler_->GetScratchRegisterList() = old_available_;
+ *assembler_->GetScratchVfpRegisterList() = old_available_vfp_;
}
Register UseScratchRegisterScope::Acquire() {
- DCHECK_NOT_NULL(available_);
- DCHECK_NE(*available_, 0);
- int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
- *available_ &= ~(1UL << index);
- return Register::from_code(index);
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_NE(*available, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
+ Register reg = Register::from_code(index);
+ *available &= ~reg.bit();
+ return reg;
}
} // namespace internal
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 8b95aad886..32baa0ae8d 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -56,8 +56,9 @@ namespace internal {
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
-#define ALLOCATABLE_GENERAL_REGISTERS(V) \
- V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
@@ -184,6 +185,17 @@ enum SwVfpRegisterCode {
kSwVfpAfterLast
};
+// Representation of a list of non-overlapping VFP registers. This list
+// represents the data layout of VFP registers as a bitfield:
+// S registers cover 1 bit
+// D registers cover 2 bits
+// Q registers cover 4 bits
+//
+// This way, we make sure no registers in the list ever overlap. However, a list
+// may represent multiple different sets of registers,
+// e.g. [d0 s2 s3] <=> [s0 s1 d1].
+typedef uint64_t VfpRegList;
+
// Single word VFP register.
class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
public:
@@ -195,6 +207,11 @@ class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
*vm = reg_code >> 1;
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // Each bit in the list corresponds to a S register.
+ return uint64_t{0x1} << code();
+ }
private:
friend class RegisterBase;
@@ -217,10 +234,6 @@ enum DoubleRegisterCode {
// Double word VFP register.
class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
public:
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0, that does not fit in the immediate field of vmov instructions.
- // d14: 0.0
- // d15: scratch register.
static constexpr int kSizeInBytes = 8;
inline static int NumRegisters();
@@ -231,6 +244,11 @@ class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
*vm = reg_code & 0x0F;
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A D register overlaps two S registers.
+ return uint64_t{0x3} << (code() * 2);
+ }
private:
friend class RegisterBase;
@@ -255,6 +273,11 @@ class LowDwVfpRegister
SwVfpRegister high() const {
return SwVfpRegister::from_code(code() * 2 + 1);
}
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A D register overlaps two S registers.
+ return uint64_t{0x3} << (code() * 2);
+ }
private:
friend class RegisterBase;
@@ -282,6 +305,11 @@ class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
DwVfpRegister high() const {
return DwVfpRegister::from_code(code() * 2 + 1);
}
+ VfpRegList ToVfpRegList() const {
+ DCHECK(is_valid());
+ // A Q register overlaps four S registers.
+ return uint64_t{0xf} << (code() * 4);
+ }
private:
friend class RegisterBase;
@@ -334,12 +362,6 @@ SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
constexpr LowDwVfpRegister kDoubleRegZero = d13;
-constexpr LowDwVfpRegister kScratchDoubleReg = d14;
-// This scratch q-register aliases d14 (kScratchDoubleReg) and d15, but is only
-// used if NEON is supported, which implies VFP32DREGS. When there are only 16
-// d-registers, d15 is still allocatable.
-constexpr QwNeonRegister kScratchQuadReg = q7;
-constexpr LowDwVfpRegister kScratchDoubleReg2 = d15;
constexpr CRegister no_creg = CRegister::no_reg();
@@ -376,7 +398,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32));
+ RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(static Operand Zero());
INLINE(explicit Operand(const ExternalReference& f));
explicit Operand(Handle<HeapObject> handle);
@@ -651,7 +673,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
@@ -665,12 +687,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code,
- Address target);
+ Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the constant pool, not the actual call
@@ -685,6 +706,9 @@ class Assembler : public AssemblerBase {
// register.
static constexpr int kPcLoadDelta = 8;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+ VfpRegList* GetScratchVfpRegisterList() {
+ return &scratch_vfp_register_list_;
+ }
// ---------------------------------------------------------------------------
// Code generation
@@ -717,6 +741,8 @@ class Assembler : public AssemblerBase {
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
+ void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
+ Condition cond = al);
void eor(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@@ -936,6 +962,9 @@ class Assembler : public AssemblerBase {
void dsb(BarrierOption option);
void isb(BarrierOption option);
+ // Conditional speculation barrier.
+ void csdb();
+
// Coprocessor instructions
void cdp(Coprocessor coproc, int opcode_1,
@@ -1655,6 +1684,7 @@ class Assembler : public AssemblerBase {
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
+ VfpRegList scratch_vfp_register_list_;
private:
// Avoid overflows for displacements etc.
@@ -1732,6 +1762,7 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
+ friend class UseScratchRegisterScope;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
@@ -1747,8 +1778,6 @@ class Assembler : public AssemblerBase {
std::forward_list<HeapObjectRequest> heap_object_requests_;
};
-constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
-
class EnsureSpace BASE_EMBEDDED {
public:
INLINE(explicit EnsureSpace(Assembler* assembler));
@@ -1760,7 +1789,6 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler();
void Emit(Address addr);
- void FlushICache(Isolate* isolate);
};
// This scope utility allows scratch registers to be managed safely. The
@@ -1779,12 +1807,38 @@ class UseScratchRegisterScope {
// Take a register from the list and return it.
Register Acquire();
+ SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
+ LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
+ DwVfpRegister AcquireD() {
+ DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
+ DCHECK(assembler_->VfpRegisterIsAvailable(reg));
+ return reg;
+ }
+ QwNeonRegister AcquireQ() {
+ QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
+ DCHECK(assembler_->VfpRegisterIsAvailable(reg));
+ return reg;
+ }
private:
- // Currently available scratch registers.
- RegList* available_;
+ friend class Assembler;
+ friend class TurboAssembler;
+
+ // Check if we have registers available to acquire.
+ // These methods are kept private intentionally to restrict their usage to the
+ // assemblers. Choosing to emit a difference instruction sequence depending on
+ // the availability of scratch registers is generally their job.
+ bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+ template <typename T>
+ bool CanAcquireVfp() const;
+
+ template <typename T>
+ T AcquireVfp();
+
+ Assembler* assembler_;
// Available scratch registers at the start of this scope.
RegList old_available_;
+ VfpRegList old_available_vfp_;
};
} // namespace internal
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index ee706c7656..2695bafc1b 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -46,7 +46,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
- LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ LowDwVfpRegister double_scratch = temps.AcquireLowD();
// Save the old values from these temporary registers on the stack.
__ Push(double_high, double_low);
@@ -385,6 +385,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(r1, Operand(pending_handler_entrypoint_address));
@@ -572,8 +578,8 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
- PredictableCodeSizeScope predictable(tasm);
- predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(
+ tasm, tasm->CallStubSize() + 2 * Assembler::kInstrSize);
tasm->push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(lr);
@@ -584,8 +590,8 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
- PredictableCodeSizeScope predictable(masm);
- predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(
+ masm, masm->CallStubSize() + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 9fb2eb4e8d..b3e880e048 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -166,9 +166,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -257,7 +257,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
@@ -282,9 +282,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 1c865afb09..4e52a91738 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -641,8 +641,8 @@ class Instruction {
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
- // Test for a nop instruction, which falls under type 1.
- inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
+ // Test for nop-like instructions which fall under type 1.
+ inline bool IsNopLikeType1() const { return Bits(24, 8) == 0x120F0; }
// Test for a stop instruction.
inline bool IsStop() const {
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 9a21ef862c..a4a540512d 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -30,9 +30,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
- DCHECK_EQ(kDoubleRegZero.code(), 13);
- DCHECK_EQ(kScratchDoubleReg.code(), 14);
-
{
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS,
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 9951136561..9459a7e60d 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -937,8 +937,14 @@ void Decoder::DecodeType01(Instruction* instr) {
} else {
Unknown(instr); // not used by V8
}
- } else if ((type == 1) && instr->IsNopType1()) {
- Format(instr, "nop'cond");
+ } else if ((type == 1) && instr->IsNopLikeType1()) {
+ if (instr->BitField(7, 0) == 0) {
+ Format(instr, "nop'cond");
+ } else if (instr->BitField(7, 0) == 20) {
+ Format(instr, "csdb");
+ } else {
+ Unknown(instr); // Not used in V8.
+ }
} else {
switch (instr->OpcodeField()) {
case AND: {
diff --git a/deps/v8/src/arm/frame-constants-arm.h b/deps/v8/src/arm/frame-constants-arm.h
index 9307cc22de..1230a26956 100644
--- a/deps/v8/src/arm/frame-constants-arm.h
+++ b/deps/v8/src/arm/frame-constants-arm.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_FRAMES_ARM_H_
-#define V8_ARM_FRAMES_ARM_H_
+#ifndef V8_ARM_FRAME_CONSTANTS_ARM_H_
+#define V8_ARM_FRAME_CONSTANTS_ARM_H_
namespace v8 {
namespace internal {
@@ -45,4 +45,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_FRAMES_ARM_H_
+#endif // V8_ARM_FRAME_CONSTANTS_ARM_H_
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 6b7498fde5..20ecef6c1c 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -70,12 +70,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r2, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 30190d3f34..3a96b640a2 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -18,6 +18,7 @@
#include "src/double.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -241,22 +242,6 @@ void TurboAssembler::Ret(int drop, Condition cond) {
Ret(cond);
}
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch,
- Condition cond) {
- if (scratch == no_reg) {
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- } else {
- mov(scratch, reg1, LeaveCC, cond);
- mov(reg1, reg2, LeaveCC, cond);
- mov(reg2, scratch, LeaveCC, cond);
- }
-}
-
void TurboAssembler::Call(Label* target) { bl(target); }
void TurboAssembler::Push(Handle<HeapObject> handle) {
@@ -305,27 +290,34 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
}
}
-void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
- if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing.
+void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
+ DCHECK(srcdst0 != srcdst1);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, srcdst0);
+ mov(srcdst0, srcdst1);
+ mov(srcdst1, scratch);
+}
+void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+ DCHECK(srcdst0 != srcdst1);
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
- DCHECK_NE(srcdst0, kScratchDoubleReg);
- DCHECK_NE(srcdst1, kScratchDoubleReg);
- vmov(kScratchDoubleReg, srcdst0);
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vmov(scratch, srcdst0);
vmov(srcdst0, srcdst1);
- vmov(srcdst1, kScratchDoubleReg);
+ vmov(srcdst1, scratch);
}
}
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
- if (srcdst0 != srcdst1) {
- vswp(srcdst0, srcdst1);
- }
+ DCHECK(srcdst0 != srcdst1);
+ vswp(srcdst0, srcdst1);
}
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
@@ -817,11 +809,14 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
int dst_offset = dst_code & 1;
int src_offset = src_code & 1;
if (CpuFeatures::IsSupported(NEON)) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
// On Neon we can shift and insert from d-registers.
if (src_offset == dst_offset) {
// Offsets are the same, use vdup to copy the source to the opposite lane.
- vdup(Neon32, kScratchDoubleReg, src_d_reg, src_offset);
- src_d_reg = kScratchDoubleReg;
+ vdup(Neon32, scratch, src_d_reg, src_offset);
+ // Here we are extending the lifetime of scratch.
+ src_d_reg = scratch;
src_offset = dst_offset ^ 1;
}
if (dst_offset) {
@@ -842,27 +837,30 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
// Without Neon, use the scratch registers to move src and/or dst into
// s-registers.
- int scratchSCode = kScratchDoubleReg.low().code();
- int scratchSCode2 = kScratchDoubleReg2.low().code();
+ UseScratchRegisterScope temps(this);
+ LowDwVfpRegister d_scratch = temps.AcquireLowD();
+ LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
+ int s_scratch_code = d_scratch.low().code();
+ int s_scratch_code2 = d_scratch2.low().code();
if (src_code < SwVfpRegister::kNumRegisters) {
// src is an s-register, dst is not.
- vmov(kScratchDoubleReg, dst_d_reg);
- vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
+ vmov(d_scratch, dst_d_reg);
+ vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
SwVfpRegister::from_code(src_code));
- vmov(dst_d_reg, kScratchDoubleReg);
+ vmov(dst_d_reg, d_scratch);
} else if (dst_code < SwVfpRegister::kNumRegisters) {
// dst is an s-register, src is not.
- vmov(kScratchDoubleReg, src_d_reg);
+ vmov(d_scratch, src_d_reg);
vmov(SwVfpRegister::from_code(dst_code),
- SwVfpRegister::from_code(scratchSCode + src_offset));
+ SwVfpRegister::from_code(s_scratch_code + src_offset));
} else {
// Neither src or dst are s-registers. Both scratch double registers are
// available when there are 32 VFP registers.
- vmov(kScratchDoubleReg, src_d_reg);
- vmov(kScratchDoubleReg2, dst_d_reg);
- vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
- SwVfpRegister::from_code(scratchSCode2 + src_offset));
- vmov(dst_d_reg, kScratchQuadReg.high());
+ vmov(d_scratch, src_d_reg);
+ vmov(d_scratch2, dst_d_reg);
+ vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
+ SwVfpRegister::from_code(s_scratch_code2 + src_offset));
+ vmov(dst_d_reg, d_scratch2);
}
}
@@ -870,11 +868,13 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
if (dst_code < SwVfpRegister::kNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
} else {
+ UseScratchRegisterScope temps(this);
+ LowDwVfpRegister scratch = temps.AcquireLowD();
// TODO(bbudge) If Neon supported, use load single lane form of vld1.
- int dst_s_code = kScratchDoubleReg.low().code() + (dst_code & 1);
- vmov(kScratchDoubleReg, DwVfpRegister::from_code(dst_code / 2));
+ int dst_s_code = scratch.low().code() + (dst_code & 1);
+ vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
vldr(SwVfpRegister::from_code(dst_s_code), src);
- vmov(DwVfpRegister::from_code(dst_code / 2), kScratchDoubleReg);
+ vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
}
}
@@ -883,8 +883,10 @@ void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
vstr(SwVfpRegister::from_code(src_code), dst);
} else {
// TODO(bbudge) If Neon supported, use store single lane form of vst1.
- int src_s_code = kScratchDoubleReg.low().code() + (src_code & 1);
- vmov(kScratchDoubleReg, DwVfpRegister::from_code(src_code / 2));
+ UseScratchRegisterScope temps(this);
+ LowDwVfpRegister scratch = temps.AcquireLowD();
+ int src_s_code = scratch.low().code() + (src_code & 1);
+ vmov(scratch, DwVfpRegister::from_code(src_code / 2));
vstr(SwVfpRegister::from_code(src_s_code), dst);
}
}
@@ -938,9 +940,11 @@ void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register scratch, Register shift) {
+ Register shift) {
DCHECK(!AreAliased(dst_high, src_low));
DCHECK(!AreAliased(dst_high, shift));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@@ -984,9 +988,11 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register scratch, Register shift) {
+ Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@@ -1031,9 +1037,11 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register scratch, Register shift) {
+ Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@@ -1362,13 +1370,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ mov(r4, Operand(debug_is_active));
+ ldrsb(r4, MemOperand(r4));
+ cmp(r4, Operand(0));
+ b(eq, &skip_hook);
+
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r4, Operand(debug_hook_avtive));
ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(0));
+ b(ne, &call_hook);
+
+ ldr(r4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(r4, &skip_hook);
+ ldr(r4, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
+ tst(r4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
b(eq, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1426,7 +1451,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = r4;
+ Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1480,14 +1505,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(r1, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(r1, function);
- InvokeFunction(r1, expected, actual, flag);
-}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1615,13 +1632,22 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
- LowDwVfpRegister double_scratch = kScratchDoubleReg;
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
-
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ SwVfpRegister single_scratch = SwVfpRegister::no_reg();
+ if (temps.CanAcquireVfp<SwVfpRegister>()) {
+ single_scratch = temps.AcquireS();
+ } else {
+ // Re-use the input as a scratch register. However, we can only do this if
+ // the input register is d0-d15 as there are no s32+ registers.
+ DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
+ LowDwVfpRegister double_scratch =
+ LowDwVfpRegister::from_code(double_input.code());
+ single_scratch = double_scratch.low();
+ }
+ vcvt_s32_f64(single_scratch, double_input);
+ vmov(result, single_scratch);
+ Register scratch = temps.Acquire();
// If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
sub(scratch, result, Operand(1));
cmp(scratch, Operand(0x7FFFFFFE));
@@ -1704,6 +1730,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -2275,6 +2307,15 @@ bool AreAliased(Register reg1,
}
#endif
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // We can use the register pc - 8 for the address of the current instruction.
+ sub(dst, pc, Operand(pc_offset() + TurboAssembler::kPcLoadDelta));
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ mov(kSpeculationPoisonRegister, Operand(-1));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index cf731cbedb..50ce6dc005 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -20,12 +20,15 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
+constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kJavaScriptCallArgCountRegister = r0;
+constexpr Register kJavaScriptCallCodeStartRegister = r2;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
+constexpr Register kOffHeapTrampolineRegister = r4;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
@@ -305,15 +308,15 @@ class TurboAssembler : public Assembler {
inline bool AllowThisStubCall(CodeStub* stub);
void LslPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
+ Register src_high, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
+ Register src_high, Register shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register scratch, Register shift);
+ Register src_high, Register shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
@@ -481,7 +484,8 @@ class TurboAssembler : public Assembler {
void VmovExtended(int dst_code, const MemOperand& src);
void VmovExtended(const MemOperand& dst, int src_code);
- // Register swap.
+ // Register swap. Note that the register operands should be distinct.
+ void Swap(Register srcdst0, Register srcdst1);
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
@@ -530,6 +534,12 @@ class TurboAssembler : public Assembler {
#endif
}
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
private:
bool has_frame_ = false;
Isolate* const isolate_;
@@ -579,11 +589,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg,
- Condition cond = al);
-
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
@@ -694,10 +699,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support
void MaybeDropFrames();
@@ -797,6 +798,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// ---------------------------------------------------------------------------
// StatsCounter support
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 52fe902237..6a735fcef6 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -547,8 +547,7 @@ void ArmDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -645,11 +644,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@@ -2314,8 +2308,15 @@ void Simulator::DecodeType01(Instruction* instr) {
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
- } else if ((type == 1) && instr->IsNopType1()) {
- // NOP.
+ } else if ((type == 1) && instr->IsNopLikeType1()) {
+ if (instr->BitField(7, 0) == 0) {
+ // NOP.
+ } else if (instr->BitField(7, 0) == 20) {
+ // CSDB.
+ } else {
+ PrintF("%08x\n", instr->InstructionBits());
+ UNIMPLEMENTED();
+ }
} else {
int rd = instr->RdValue();
int rn = instr->RnValue();
@@ -5640,7 +5641,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
@@ -5822,7 +5823,7 @@ intptr_t Simulator::CallImpl(byte* entry, int argument_count,
return get_register(r0);
}
-int32_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
+intptr_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 1cb11ffd96..46a84ff4b4 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -183,6 +183,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -381,9 +382,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation
- base::CustomMatcherHashMap* i_cache_;
-
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 11c4bbf33f..0c31400d9c 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -13,8 +13,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return true; }
@@ -95,7 +94,7 @@ inline void CPURegList::Remove(int code) {
inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
- return csp;
+ return sp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
@@ -105,7 +104,7 @@ inline Register Register::XRegFromCode(unsigned code) {
inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
- return wcsp;
+ return wsp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);
@@ -198,9 +197,7 @@ inline VRegister CPURegister::Q() const {
template<typename T>
struct ImmediateInitializer {
static const bool kIsIntType = true;
- static inline RelocInfo::Mode rmode_for(T) {
- return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
- }
+ static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
return t;
@@ -211,9 +208,7 @@ struct ImmediateInitializer {
template<>
struct ImmediateInitializer<Smi*> {
static const bool kIsIntType = false;
- static inline RelocInfo::Mode rmode_for(Smi* t) {
- return RelocInfo::NONE64;
- }
+ static inline RelocInfo::Mode rmode_for(Smi* t) { return RelocInfo::NONE; }
static inline int64_t immediate_for(Smi* t) {;
return reinterpret_cast<int64_t>(t);
}
@@ -581,26 +576,23 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
}
-
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
+ Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICache(isolate(), pc, sizeof(target));
+ // Assembler::FlushICache(pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@@ -647,7 +639,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -681,28 +673,28 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index a031884e1f..52c2e4643f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -181,22 +181,20 @@ uint32_t RelocInfo::embedded_size() const {
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -467,9 +465,6 @@ void ConstPool::Clear() {
bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
- // Constant pool currently does not support 32-bit entries.
- DCHECK(mode != RelocInfo::NONE32);
-
return RelocInfo::IsNone(mode) ||
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
}
@@ -2994,6 +2989,8 @@ void Assembler::isb() {
Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
}
+void Assembler::csdb() { hint(CSDB); }
+
void Assembler::fmov(const VRegister& vd, double imm) {
if (vd.IsScalar()) {
DCHECK(vd.Is1D());
@@ -4745,6 +4742,9 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // Non-relocatable constants should not end up in the literal pool.
+ DCHECK(!RelocInfo::IsNone(rmode));
+
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, nullptr);
bool write_reloc_info = true;
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 2deae8aaa4..c956c072b7 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -68,7 +68,6 @@ namespace internal {
// clang-format on
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
-static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
@@ -455,8 +454,8 @@ constexpr Register no_reg = NoReg;
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
-DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits);
-DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits);
+DEFINE_REGISTER(Register, wsp, kSPRegInternalCode, kWRegSizeInBits);
+DEFINE_REGISTER(Register, sp, kSPRegInternalCode, kXRegSizeInBits);
#define DEFINE_VREGISTERS(N) \
DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
@@ -994,7 +993,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
@@ -1008,12 +1007,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address constant_pool_entry, Code* code,
- Address target);
+ Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
@@ -1754,6 +1752,9 @@ class Assembler : public AssemblerBase {
// Instruction synchronization barrier
void isb();
+ // Conditional speculation barrier.
+ void csdb();
+
// Alias for system instructions.
void nop() { hint(NOP); }
@@ -3677,18 +3678,9 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
-
- // This version will flush at destruction.
- PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
- : PatchingAssembler(IsolateData(isolate), start, count) {
- CHECK_NOT_NULL(isolate);
- isolate_ = isolate;
- }
-
- // This version will not flush.
+ // Note that the instruction cache will not be flushed.
PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
- : Assembler(isolate_data, start, count * kInstructionSize + kGap),
- isolate_(nullptr) {
+ : Assembler(isolate_data, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
@@ -3701,18 +3693,12 @@ class PatchingAssembler : public Assembler {
DCHECK((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
DCHECK(IsConstPoolEmpty());
- // Flush the Instruction cache.
- size_t length = buffer_size_ - kGap;
- if (isolate_ != nullptr) Assembler::FlushICache(isolate_, buffer_, length);
}
// See definition of PatchAdrFar() for details.
static constexpr int kAdrFarPatchableNNops = 2;
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
-
- private:
- Isolate* isolate_;
};
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 52f92b6af9..07d020880d 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -30,7 +30,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
- __ Str(x1, MemOperand(__ StackPointer(), x5));
+ __ Poke(x1, Operand(x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
@@ -314,7 +314,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ EnterExitFrame(
save_doubles(), x10, extra_stack_space,
is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
- DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
__ Poke(argv, 1 * kPointerSize);
@@ -349,12 +348,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
- // csp[...]: Saved doubles, if saved_doubles is true.
- // csp[32]: Alignment padding, if necessary.
- // csp[24]: Preserved x23 (used for target).
- // csp[16]: Preserved x22 (used for argc).
- // csp[8]: Preserved x21 (used for argv).
- // csp -> csp[0]: Space reserved for the return address.
+ // sp[...]: Saved doubles, if saved_doubles is true.
+ // sp[32]: Alignment padding, if necessary.
+ // sp[24]: Preserved x23 (used for target).
+ // sp[16]: Preserved x22 (used for argc).
+ // sp[8]: Preserved x21 (used for argv).
+ // sp -> sp[0]: Space reserved for the return address.
//
// After a successful call, the exit frame, preserved registers (x21-x23) and
// the arguments (including the receiver) are dropped or popped as
@@ -364,8 +363,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// untouched, and the stub either throws an exception by jumping to one of
// the exception_returned label.
- DCHECK(csp.Is(__ StackPointer()));
-
// Prepare AAPCS64 arguments to pass to the builtin.
__ Mov(x0, argc);
__ Mov(x1, argv);
@@ -437,7 +434,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
isolate());
- DCHECK(csp.Is(masm->StackPointer()));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Mov(x0, 0); // argc.
@@ -454,7 +450,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Register scratch = temps.AcquireX();
__ Mov(scratch, Operand(pending_handler_sp_address));
__ Ldr(scratch, MemOperand(scratch));
- __ Mov(csp, scratch);
+ __ Mov(sp, scratch);
}
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@@ -466,6 +462,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
@@ -511,7 +513,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Push(x13, x12, xzr, x10);
// Set up fp.
- __ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
+ __ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
@@ -582,7 +584,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
- __ Mov(scratch, __ StackPointer());
+ __ Mov(scratch, sp);
__ Str(scratch, MemOperand(x11));
}
@@ -740,10 +742,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- // Make sure the caller configured the stack pointer (see comment in
- // DirectCEntryStub::Generate).
- DCHECK(csp.Is(__ StackPointer()));
-
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
__ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
@@ -1260,7 +1258,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Prepare arguments.
Register args = x6;
- __ Mov(args, masm->StackPointer());
+ __ Mov(args, sp);
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
@@ -1344,7 +1342,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
"slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
- __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Mov(x0, sp); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index b02dd5d2d7..406b139a50 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -407,12 +407,13 @@ enum Extend {
};
enum SystemHint {
- NOP = 0,
+ NOP = 0,
YIELD = 1,
- WFE = 2,
- WFI = 3,
- SEV = 4,
- SEVL = 5
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5,
+ CSDB = 20
};
enum BarrierDomain {
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 55a09dc1c5..201dfaa423 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -168,11 +168,6 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
- (instr->Mask(0x003FFFC0) == 0x000320C0) ||
- (instr->Mask(0x003FF100) == 0x00032100) ||
- (instr->Mask(0x003FF200) == 0x00032200) ||
- (instr->Mask(0x003FF400) == 0x00032400) ||
- (instr->Mask(0x003FF800) == 0x00032800) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
V::VisitUnallocated(instr);
@@ -467,6 +462,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
break;
}
+ V8_FALLTHROUGH;
}
case 1:
case 3:
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 8269e8e50a..a81621b6a9 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -33,7 +33,7 @@ void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
- masm->Add(src, masm->StackPointer(), src_offset);
+ masm->Add(src, sp, src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
@@ -140,8 +140,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, adding two words for alignment padding and
// bailout id.
- __ Add(fp_to_sp, __ StackPointer(),
- kSavedRegistersAreaSize + (2 * kPointerSize));
+ __ Add(fp_to_sp, sp, kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
@@ -222,7 +221,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
- __ Mov(__ StackPointer(), scratch);
+ __ Mov(sp, scratch);
}
// Replace the current (input) frame with the output frames.
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 41c654b214..d344903d59 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -968,7 +968,7 @@ void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
switch (instr->Mask(FPCompareMask)) {
case FCMP_s_zero:
- case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_d_zero: form = form_zero; V8_FALLTHROUGH;
case FCMP_s:
case FCMP_d: mnemonic = "fcmp"; break;
default: form = "(FPCompare)";
@@ -1246,6 +1246,11 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
form = nullptr;
break;
}
+ case CSDB: {
+ mnemonic = "csdb";
+ form = nullptr;
+ break;
+ }
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
switch (instr->Mask(MemBarrierMask)) {
@@ -3327,7 +3332,7 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
}
}
- if (reg.IsVRegister() || !(reg.Aliases(csp) || reg.Aliases(xzr))) {
+ if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
// Filter special registers
if (reg.IsX() && (reg.code() == 27)) {
AppendToOutput("cp");
@@ -3339,9 +3344,9 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
// A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
AppendToOutput("%c%d", reg_char, reg.code());
}
- } else if (reg.Aliases(csp)) {
- // Disassemble w31/x31 as stack pointer wcsp/csp.
- AppendToOutput("%s", reg.Is64Bits() ? "csp" : "wcsp");
+ } else if (reg.Aliases(sp)) {
+ // Disassemble w31/x31 as stack pointer wsp/sp.
+ AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
} else {
// Disassemble w31/x31 as zero register wzr/xzr.
AppendToOutput("%czr", reg_char);
@@ -3713,6 +3718,8 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
return 0;
}
+ UNIMPLEMENTED();
+ return 0;
}
case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
@@ -3836,7 +3843,8 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
switch (format[1]) {
case 'D': { // NDP.
DCHECK(instr->ShiftDP() != ROR);
- } // Fall through.
+ V8_FALLTHROUGH;
+ }
case 'L': { // NLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index c12d53b7e6..0edb2ea583 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DISASM_ARM64_H
-#define V8_ARM64_DISASM_ARM64_H
+#ifndef V8_ARM64_DISASM_ARM64_H_
+#define V8_ARM64_DISASM_ARM64_H_
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
@@ -96,4 +96,4 @@ class PrintDisassembler : public DisassemblingDecoder {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_DISASM_ARM64_H
+#endif // V8_ARM64_DISASM_ARM64_H_
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
index 48909d5b2d..79d8510f9b 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -11,7 +11,7 @@ namespace internal {
static const int kX0DwarfCode = 0;
static const int kFpDwarfCode = 29;
static const int kLrDwarfCode = 30;
-static const int kCSpDwarfCode = 31;
+static const int kSpDwarfCode = 31;
const int EhFrameConstants::kCodeAlignmentFactor = 4;
const int EhFrameConstants::kDataAlignmentFactor = -8;
@@ -33,7 +33,7 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
case kRegCode_x30:
return kLrDwarfCode;
case kSPRegInternalCode:
- return kCSpDwarfCode;
+ return kSpDwarfCode;
case kRegCode_x0:
return kX0DwarfCode;
default:
@@ -51,8 +51,8 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "fp";
case kLrDwarfCode:
return "lr";
- case kCSpDwarfCode:
- return "csp"; // This could be zr as well
+ case kSpDwarfCode:
+ return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
return nullptr;
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index a337079786..00ac99d1be 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_FRAMES_ARM64_H_
-#define V8_ARM64_FRAMES_ARM64_H_
+#ifndef V8_ARM64_FRAME_CONSTANTS_ARM64_H_
+#define V8_ARM64_FRAME_CONSTANTS_ARM64_H_
namespace v8 {
namespace internal {
@@ -61,4 +61,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_FRAMES_ARM64_H_
+#endif // V8_ARM64_FRAME_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 0cc3e803d0..499023ebb2 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -258,7 +258,7 @@ class Instruction {
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
- // The following instructions use csp or wsp as Rd:
+ // The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
@@ -272,7 +272,7 @@ class Instruction {
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
- // can set the flags. The others can all write into csp.
+ // can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
@@ -287,7 +287,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
- // The following instructions use csp or wsp as Rn:
+ // The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 8e9cce7197..f9550782c1 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -91,7 +91,6 @@ static const CounterDescriptor kCounterList[] = {
{"PC Addressing", Gauge},
{"Other", Gauge},
- {"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
@@ -238,16 +237,8 @@ void Instrument::VisitPCRelAddressing(Instruction* instr) {
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
}
@@ -470,16 +461,8 @@ void Instrument::VisitAddSubShifted(Instruction* instr) {
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
- static Counter* sp_counter = GetCounter("SP Adjust");
- static Counter* add_sub_counter = GetCounter("Add/Sub DP");
- if (((instr->Mask(AddSubOpMask) == SUB) ||
- (instr->Mask(AddSubOpMask) == ADD)) &&
- (instr->Rd() == 31) && (instr->Rn() == 31)) {
- // Count adjustments to the C stack pointer caused by V8 needing two SPs.
- sp_counter->Increment();
- } else {
- add_sub_counter->Increment();
- }
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 17b058bd01..bcbe5d97dc 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -69,15 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: function info
- // x2: feedback vector
- // x3: slot
- Register registers[] = {x1, x2, x3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 0861551d89..f96d4b20b8 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -404,8 +404,7 @@ void MacroAssembler::CzeroX(const Register& rd,
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
-void MacroAssembler::CmovX(const Register& rd,
- const Register& rn,
+void TurboAssembler::CmovX(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
@@ -416,6 +415,11 @@ void MacroAssembler::CmovX(const Register& rd,
}
}
+void TurboAssembler::Csdb() {
+ DCHECK(allow_macro_instructions());
+ csdb();
+}
+
void TurboAssembler::Cset(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -423,8 +427,7 @@ void TurboAssembler::Cset(const Register& rd, Condition cond) {
cset(rd, cond);
}
-
-void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+void TurboAssembler::Csetm(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -461,14 +464,12 @@ void MacroAssembler::Csneg(const Register& rd,
csneg(rd, rn, rm, cond);
}
-
-void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dmb(domain, type);
}
-
-void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dsb(domain, type);
}
@@ -651,10 +652,12 @@ void TurboAssembler::Fmov(VRegister vd, double imm) {
if (bits == 0) {
fmov(vd, xzr);
} else {
- Ldr(vd, imm);
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, bits);
+ fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -678,12 +681,10 @@ void TurboAssembler::Fmov(VRegister vd, float imm) {
} else {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
- // TODO(all): Use Assembler::ldr(const VRegister& ft, float imm).
Mov(tmp, bit_cast<uint32_t>(imm));
Fmov(vd, tmp);
}
} else {
- // TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@@ -737,8 +738,7 @@ void MacroAssembler::Hlt(int code) {
hlt(code);
}
-
-void MacroAssembler::Isb() {
+void TurboAssembler::Isb() {
DCHECK(allow_macro_instructions());
isb();
}
@@ -748,12 +748,6 @@ void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
ldr(rt, operand);
}
-void TurboAssembler::Ldr(const CPURegister& rt, double imm) {
- DCHECK(allow_macro_instructions());
- DCHECK(rt.Is64Bits());
- ldr(rt, Immediate(bit_cast<uint64_t>(imm)));
-}
-
void TurboAssembler::Lsl(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
@@ -1042,58 +1036,6 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
-void MacroAssembler::AlignAndSetCSPForFrame() {
- int sp_alignment = ActivationFrameAlignment();
- // AAPCS64 mandates at least 16-byte alignment.
- DCHECK_GE(sp_alignment, 16);
- DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
- Bic(csp, StackPointer(), sp_alignment - 1);
-}
-
-void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
- DCHECK(!csp.Is(StackPointer()));
- if (!TmpList()->IsEmpty()) {
- Sub(csp, StackPointer(), space);
- } else {
- // TODO(jbramley): Several callers rely on this not using scratch
- // registers, so we use the assembler directly here. However, this means
- // that large immediate values of 'space' cannot be handled cleanly. (Only
- // 24-bits immediates or values of 'space' that can be encoded in one
- // instruction are accepted.) Once we implement our flexible scratch
- // register idea, we could greatly simplify this function.
- InstructionAccurateScope scope(this);
- DCHECK(space.IsImmediate());
- // Align to 16 bytes.
- uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
- DCHECK(is_uint24(imm));
-
- Register source = StackPointer();
- if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
- bic(csp, source, 0xf);
- source = csp;
- }
- if (!is_uint12(imm)) {
- int64_t imm_top_12_bits = imm >> 12;
- sub(csp, source, imm_top_12_bits << 12);
- source = csp;
- imm -= imm_top_12_bits << 12;
- }
- if (imm > 0) {
- sub(csp, source, imm);
- }
- }
- AssertStackConsistency();
-}
-
-void TurboAssembler::SyncSystemStackPointer() {
- DCHECK(emit_debug_code());
- DCHECK(!csp.Is(StackPointer()));
- { InstructionAccurateScope scope(this);
- mov(csp, StackPointer());
- }
- AssertStackConsistency();
-}
-
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@@ -1249,14 +1191,9 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
if (size == 0) {
return;
}
+ DCHECK_EQ(size % 16, 0);
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
@@ -1269,13 +1206,9 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (size.IsZero()) {
return;
}
-
AssertPositiveOrZero(count);
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
@@ -1290,11 +1223,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- if (!csp.Is(StackPointer())) {
- BumpSystemStackPointer(size);
- }
-
- Sub(StackPointer(), StackPointer(), size);
+ Sub(sp, sp, size);
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
@@ -1305,16 +1234,8 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (csp.Is(StackPointer())) {
- DCHECK_EQ(size % 16, 0);
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
+ DCHECK_EQ(size % 16, 0);
}
void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
@@ -1329,14 +1250,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}
void TurboAssembler::DropArguments(const Register& count,
@@ -1378,14 +1292,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
- Add(StackPointer(), StackPointer(), size);
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
- }
+ Add(sp, sp, size);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 3869046f74..267bc2151b 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -15,6 +15,7 @@
#include "src/frame-constants.h"
#include "src/frames-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -188,15 +189,14 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
- PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
+ PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
- if (rd.Is(csp)) {
+ if (rd.IsSP()) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
Logical(temp, rn, imm_operand, op);
- Mov(csp, temp);
- AssertStackConsistency();
+ Mov(sp, temp);
} else {
Logical(rd, rn, imm_operand, op);
}
@@ -294,7 +294,6 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
- AssertStackConsistency();
}
}
}
@@ -337,7 +336,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// registers is not required to clear the top word of the X register. In
// this case, the instruction is discarded.
//
- // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ // If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
(discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
@@ -596,11 +595,8 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn,
}
}
-
-void MacroAssembler::Csel(const Register& rd,
- const Register& rn,
- const Operand& operand,
- Condition cond) {
+void TurboAssembler::Csel(const Register& rd, const Register& rn,
+ const Operand& operand, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -724,11 +720,11 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
- if (rd.Is(csp)) {
+ if (rd.Is(sp)) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
- } else if (rn.Is(csp)) {
+ } else if (rn.Is(sp)) {
mode = kLimitShiftForSP;
}
@@ -1105,9 +1101,9 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
PushPreamble(size);
// Reserve room for src0 and push src1.
- str(src1, MemOperand(StackPointer(), -size, PreIndex));
+ str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
- str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
+ str(src0, MemOperand(sp, src1.SizeInBytes()));
}
@@ -1166,9 +1162,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
- // Push up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in order
- // to maintain the 16-byte alignment for csp.
+ // Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& src0 = registers.PopHighestIndex();
@@ -1183,9 +1177,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- // Pop up to four registers at a time because if the current stack pointer is
- // csp and reg_size is 32, registers must be pushed in blocks of four in
- // order to maintain the 16-byte alignment for csp.
+ // Pop up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -1258,23 +1250,23 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
switch (count) {
case 1:
DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
- str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ str(src0, MemOperand(sp, -1 * size, PreIndex));
break;
case 2:
DCHECK(src2.IsNone() && src3.IsNone());
- stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
break;
case 3:
DCHECK(src3.IsNone());
- stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
- str(src0, MemOperand(StackPointer(), 2 * size));
+ stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
+ str(src0, MemOperand(sp, 2 * size));
break;
case 4:
// Skip over 4 * size, then fill in the gap. This allows four W registers
- // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // to be pushed using sp, whilst maintaining 16-byte alignment for sp
// at all times.
- stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
- stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(sp, 2 * size));
break;
default:
UNREACHABLE();
@@ -1295,24 +1287,24 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
switch (count) {
case 1:
DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
- ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
break;
case 2:
DCHECK(dst2.IsNone() && dst3.IsNone());
- ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
break;
case 3:
DCHECK(dst3.IsNone());
- ldr(dst2, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ ldr(dst2, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
break;
case 4:
// Load the higher addresses first, then load the lower addresses and
// skip the whole block in the second instruction. This allows four W
- // registers to be popped using csp, whilst maintaining 16-byte alignment
- // for csp at all times.
- ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
- ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ // registers to be popped using sp, whilst maintaining 16-byte alignment
+ // for sp at all times.
+ ldp(dst2, dst3, MemOperand(sp, 2 * size));
+ ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
break;
default:
UNREACHABLE();
@@ -1322,43 +1314,27 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
void TurboAssembler::PushPreamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else {
- // Even if the current stack pointer is not the system stack pointer (csp),
- // the system stack pointer will still be modified in order to comply with
- // ABI rules about accessing memory below the system stack pointer.
- BumpSystemStackPointer(total_size);
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PopPostamble(Operand total_size) {
if (total_size.IsZero()) return;
- if (csp.Is(StackPointer())) {
- // If the current stack pointer is csp, then it must be aligned to 16 bytes
- // on entry and the total size of the specified registers must also be a
- // multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
- } else if (emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- SyncSystemStackPointer();
+ // The stack pointer must be aligned to 16 bytes on entry, and the total
+ // size of the specified registers must also be a multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PushPreamble(int count, int size) {
@@ -1376,7 +1352,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Str(src, MemOperand(StackPointer(), offset));
+ Str(src, MemOperand(sp, offset));
}
@@ -1388,14 +1364,14 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
- Ldr(dst, MemOperand(StackPointer(), offset));
+ Ldr(dst, MemOperand(sp, offset));
}
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
- Stp(src1, src2, MemOperand(StackPointer(), offset));
+ Stp(src1, src2, MemOperand(sp, offset));
}
@@ -1404,7 +1380,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
- Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+ Ldp(dst1, dst2, MemOperand(sp, offset));
}
@@ -1412,11 +1388,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@@ -1436,11 +1408,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
- // This method must not be called unless the current stack pointer is the
- // system stack pointer (csp).
- DCHECK(csp.Is(StackPointer()));
-
- MemOperand tos(csp, 2 * kXRegSize, PostIndex);
+ MemOperand tos(sp, 2 * kXRegSize, PostIndex);
ldp(x19, x20, tos);
ldp(x21, x22, tos);
@@ -1455,44 +1423,15 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
-void TurboAssembler::AssertStackConsistency() {
- // Avoid emitting code when !use_real_abort() since non-real aborts cause too
- // much code to be generated.
- if (emit_debug_code() && use_real_aborts()) {
- if (csp.Is(StackPointer())) {
- // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
- // can't check the alignment of csp without using a scratch register (or
- // clobbering the flags), but the processor (or simulator) will abort if
- // it is not properly aligned during a load.
- ldr(xzr, MemOperand(csp, 0));
- }
- if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
- Label ok;
- // Check that csp <= StackPointer(), preserving all registers and NZCV.
- sub(StackPointer(), csp, StackPointer());
- cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
- tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
-
- // Avoid generating AssertStackConsistency checks for the Push in Abort.
- { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
- }
-
- bind(&ok);
- // Restore StackPointer().
- sub(StackPointer(), csp, StackPointer());
- }
- }
-}
-
-void TurboAssembler::AssertCspAligned() {
+void TurboAssembler::AssertSpAligned() {
if (emit_debug_code() && use_real_aborts()) {
- // TODO(titzer): use a real assert for alignment check?
+ // Arm64 requires the stack pointer to be 16-byte aligned prior to address
+ // calculation.
UseScratchRegisterScope scope(this);
Register temp = scope.AcquireX();
- ldr(temp, MemOperand(csp));
+ Mov(temp, sp);
+ Tst(temp, 15);
+ Check(eq, AbortReason::kUnexpectedStackPointer);
}
}
@@ -1568,11 +1507,11 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
- Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
+ Add(dst, sp, slot_offset << kPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
- Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
+ Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
@@ -1630,6 +1569,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
+void TurboAssembler::Swap(Register lhs, Register rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
+void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
+ DCHECK(lhs.IsSameSizeAndType(rhs));
+ DCHECK(!lhs.Is(rhs));
+ UseScratchRegisterScope temps(this);
+ VRegister temp = VRegister::no_reg();
+ if (lhs.IsS()) {
+ temp = temps.AcquireS();
+ } else if (lhs.IsD()) {
+ temp = temps.AcquireD();
+ } else {
+ DCHECK(lhs.IsQ());
+ temp = temps.AcquireQ();
+ }
+ Mov(temp, rhs);
+ Mov(rhs, lhs);
+ Mov(lhs, temp);
+}
+
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1792,6 +1759,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
+ Mov(kOffHeapTrampolineRegister, bytes_address);
+ Br(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@@ -1927,13 +1900,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
Bind(&start_call);
#endif
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- if (rmode == RelocInfo::NONE64) {
+ if (RelocInfo::IsNone(rmode)) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
@@ -2009,62 +1979,15 @@ int TurboAssembler::CallSize(Label* target) {
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
USE(code);
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- DCHECK(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
- SmiCheckType smi_check_type) {
- Label on_not_heap_number;
-
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, &on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
-
- Bind(&on_not_heap_number);
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Label* on_not_heap_number,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(object, on_not_heap_number);
- }
-
- AssertNotSmi(object);
-
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
+ return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
+ : kCallSizeWithRelocation;
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2110,12 +2033,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
- Add(src_reg, StackPointer(),
- Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
+ Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, src_reg, kPointerSize);
} else {
- Add(src_reg, StackPointer(),
- (callee_args_count.immediate() + 1) * kPointerSize);
+ Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
}
// Round src_reg up to a multiple of 16 bytes, so we include any potential
@@ -2145,12 +2066,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
- Cmp(StackPointer(), src_reg);
+ Cmp(sp, src_reg);
B(ne, &loop);
// Leave current frame.
- Mov(StackPointer(), dst_reg);
- AssertStackConsistency();
+ Mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@@ -2224,12 +2144,28 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ Mov(x4, Operand(debug_is_active));
+ Ldrsb(x4, MemOperand(x4));
+ Cbz(x4, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
+ Cbnz(x4, &call_hook);
+
+ Ldr(x4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(x4, &skip_hook);
+ Ldr(x4, FieldMemOperand(x4, DebugInfo::kFlagsOffset));
+ Tst(x4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
+ B(eq, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2284,7 +2220,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = x4;
+ Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -2343,16 +2279,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(function, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- // Contract with called JS functions requires that function is passed in x1.
- // (See FullCodeGenerator::Generate().)
- LoadObject(x1, function);
- InvokeFunction(x1, expected, actual, flag);
-}
-
void TurboAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
@@ -2402,7 +2328,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
- Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@@ -2414,21 +2340,20 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
- Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
+ Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
// sp[4] : lr
// sp[3] : fp
// sp[1] : type
// sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
- Mov(fp, csp);
+ Mov(fp, sp);
Push(type_reg, padreg);
- // csp[3] : lr
- // csp[2] : fp
- // csp[1] : type
- // csp[0] : for alignment
+ // sp[3] : lr
+ // sp[2] : fp
+ // sp[1] : type
+ // sp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
Mov(type_reg, StackFrame::TypeToMarker(type));
@@ -2439,8 +2364,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
- Add(fp, StackPointer(),
- TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
@@ -2450,15 +2374,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
- DCHECK(csp.Is(StackPointer()));
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
} else {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
- Mov(StackPointer(), fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
}
@@ -2493,7 +2414,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
- Mov(fp, StackPointer());
+ Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
Mov(scratch, Operand(CodeObject()));
@@ -2540,13 +2461,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
- DCHECK(csp.Is(StackPointer()));
-
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
// padding can vary.
- Add(scratch, csp, kXRegSize);
+ Add(scratch, sp, kXRegSize);
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -2555,8 +2474,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
const Register& scratch2) {
- DCHECK(csp.Is(StackPointer()));
-
if (restore_doubles) {
ExitFrameRestoreFPRegs();
}
@@ -2582,8 +2499,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
- Mov(csp, fp);
- AssertStackConsistency();
+ Mov(sp, fp);
Pop(fp, lr);
}
@@ -2752,7 +2668,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
- // - x31 (csp) because the system stack pointer doesn't need to be included
+ // - x31 (sp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
// This function implements the mapping of register code to index into the
@@ -3052,7 +2968,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- DCHECK(!kCallerSaved.IncludesAliasOf(StackPointer()));
+ DCHECK(!kCallerSaved.IncludesAliasOf(sp));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@@ -3164,12 +3080,6 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Bind(&after_data);
}
- // We don't pass any arguments on the stack, but we still need to align the C
- // stack pointer to a 16-byte boundary for PCS compliance.
- if (!csp.Is(StackPointer())) {
- Bic(csp, StackPointer(), 0xF);
- }
-
CallPrintf(arg_count, pcs);
}
@@ -3208,14 +3118,6 @@ void MacroAssembler::Printf(const char * format,
CPURegister arg1,
CPURegister arg2,
CPURegister arg3) {
- // We can only print sp if it is the current stack pointer.
- if (!csp.Is(StackPointer())) {
- DCHECK(!csp.Aliases(arg0));
- DCHECK(!csp.Aliases(arg1));
- DCHECK(!csp.Aliases(arg2));
- DCHECK(!csp.Aliases(arg3));
- }
-
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@@ -3224,8 +3126,8 @@ void MacroAssembler::Printf(const char * format,
FPTmpList()->set_list(0);
// Preserve all caller-saved registers as well as NZCV.
- // If csp is the stack pointer, PushCPURegList asserts that the size of each
- // list is a multiple of 16 bytes.
+ // PushCPURegList asserts that the size of each list is a multiple of 16
+ // bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedV);
@@ -3241,15 +3143,15 @@ void MacroAssembler::Printf(const char * format,
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
- bool arg0_sp = StackPointer().Aliases(arg0);
- bool arg1_sp = StackPointer().Aliases(arg1);
- bool arg2_sp = StackPointer().Aliases(arg2);
- bool arg3_sp = StackPointer().Aliases(arg3);
+ bool arg0_sp = sp.Aliases(arg0);
+ bool arg1_sp = sp.Aliases(arg1);
+ bool arg2_sp = sp.Aliases(arg2);
+ bool arg3_sp = sp.Aliases(arg3);
if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
// Allocate a register to hold the original stack pointer value, to pass
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
- Add(arg_sp, StackPointer(),
+ Add(arg_sp, sp,
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
@@ -3302,7 +3204,7 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
- DCHECK(!AreAliased(result, xzr, csp));
+ DCHECK(!AreAliased(result, xzr, sp));
return result;
}
@@ -3359,6 +3261,14 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
}
}
+void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
+ // We can use adr to load a pc relative location.
+ adr(rd, -pc_offset());
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ Mov(kSpeculationPoisonRegister, -1);
+}
#undef __
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 47c08f2622..c72cb39536 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -47,12 +47,15 @@ namespace internal {
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kAllocateSizeRegister x1
+#define kSpeculationPoisonRegister x18
#define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kJavaScriptCallArgCountRegister x0
+#define kJavaScriptCallCodeStartRegister x2
#define kJavaScriptCallNewTargetRegister x3
+#define kOffHeapTrampolineRegister ip0
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@@ -254,6 +257,10 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Smi* src);
+ // Register swap. Note that the register operands should be distinct.
+ void Swap(Register lhs, Register rhs);
+ void Swap(VRegister lhs, VRegister rhs);
+
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \
@@ -549,6 +556,11 @@ class TurboAssembler : public Assembler {
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Isb();
+ inline void Csdb();
+
bool AllowThisStubCall(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@@ -581,20 +593,6 @@ class TurboAssembler : public Assembler {
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
- // If emit_debug_code() is true, emit a run-time check to ensure that
- // StackPointer() does not point below the system stack pointer.
- //
- // Whilst it is architecturally legal for StackPointer() to point below csp,
- // it can be evidence of a potential bug because the ABI forbids accesses
- // below csp.
- //
- // If StackPointer() is the system stack pointer (csp), then csp will be
- // dereferenced to cause the processor (or simulator) to abort if it is not
- // properly aligned.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertStackConsistency();
-
// Remaining instructions are simple pass-through calls to the assembler.
inline void Asr(const Register& rd, const Register& rn, unsigned shift);
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
@@ -614,9 +612,6 @@ class TurboAssembler : public Assembler {
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
- // Return the stack pointer.
- inline const Register& StackPointer() const { return csp; }
-
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
void Mvn(const Register& rd, const Operand& operand);
@@ -650,9 +645,11 @@ class TurboAssembler : public Assembler {
inline void Cmp(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
+ void Csel(const Register& rd, const Register& rn, const Operand& operand,
+ Condition cond);
- // Emits a runtime assert that the CSP is aligned.
- void AssertCspAligned();
+ // Emits a runtime assert that the stack pointer is aligned.
+ void AssertSpAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
@@ -687,17 +684,14 @@ class TurboAssembler : public Assembler {
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
- // Helper function for double immediate.
- inline void Ldr(const CPURegister& rt, double imm);
// Claim or drop stack space without actually accessing memory.
//
// In debug mode, both of these will write invalid data into the claimed or
// dropped space.
//
- // If the current stack pointer (according to StackPointer()) is csp, then it
- // must be aligned to 16 bytes and the size claimed or dropped must be a
- // multiple of 16 bytes.
+ // The stack pointer must be aligned to 16 bytes and the size claimed or
+ // dropped must be a multiple of 16 bytes.
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
@@ -724,26 +718,6 @@ class TurboAssembler : public Assembler {
// Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
- // Re-synchronizes the system stack pointer (csp) with the current stack
- // pointer (according to StackPointer()).
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void SyncSystemStackPointer();
-
- // Push the system stack pointer (csp) down to allow the same to be done to
- // the current stack pointer (according to StackPointer()). This must be
- // called _before_ accessing the memory.
- //
- // This is necessary when pushing or otherwise adding things to the stack, to
- // satisfy the AAPCS64 constraint that the memory below the system stack
- // pointer is not accessed. The amount pushed will be increased as necessary
- // to ensure csp remains aligned to 16 bytes.
- //
- // This method asserts that StackPointer() is not csp, since the call does
- // not make sense in that context.
- inline void BumpSystemStackPointer(const Operand& space);
-
// Add and sub macros.
inline void Add(const Register& rd, const Register& rn,
const Operand& operand);
@@ -778,11 +752,6 @@ class TurboAssembler : public Assembler {
// The stack pointer must be aligned to 16 bytes on entry and the total size
// of the specified registers must also be a multiple of 16 bytes.
//
- // Even if the current stack pointer is not the system stack pointer (csp),
- // Push (and derived methods) will still modify the system stack pointer in
- // order to comply with ABI rules about accessing memory below the system
- // stack pointer.
- //
// Other than the registers passed into Pop, the stack pointer and (possibly)
// the system stack pointer, these methods do not modify any other registers.
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
@@ -1011,17 +980,13 @@ class TurboAssembler : public Assembler {
inline void Clz(const Register& rd, const Register& rn);
- // Poke 'src' onto the stack. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
+ // be 16 byte aligned.
void Poke(const CPURegister& src, const Operand& offset);
// Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
- // with 'src2' at a higher address than 'src1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
+ // stack pointer must be 16 byte aligned.
void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
@@ -1047,7 +1012,9 @@ class TurboAssembler : public Assembler {
void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
Condition cond);
inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
@@ -1233,6 +1200,12 @@ class TurboAssembler : public Assembler {
inline void Fcvtas(const Register& rd, const VRegister& fn);
inline void Fcvtau(const Register& rd, const VRegister& fn);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(const Register& rd);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@@ -1257,8 +1230,8 @@ class TurboAssembler : public Assembler {
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
- // arguments and stack (csp) must be prepared by the caller as for a normal
- // AAPCS64 call to 'printf'.
+ // arguments and stack must be prepared by the caller as for a normal AAPCS64
+ // call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
@@ -1326,8 +1299,6 @@ class MacroAssembler : public TurboAssembler {
inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
- void Csel(const Register& rd, const Register& rn, const Operand& operand,
- Condition cond);
#define DECLARE_FUNCTION(FN, OP) \
inline void FN(const Register& rs, const Register& rt, const Register& rn);
@@ -1344,14 +1315,10 @@ class MacroAssembler : public TurboAssembler {
inline void Cinc(const Register& rd, const Register& rn, Condition cond);
inline void Cinv(const Register& rd, const Register& rn, Condition cond);
inline void CzeroX(const Register& rd, Condition cond);
- inline void CmovX(const Register& rd, const Register& rn, Condition cond);
- inline void Csetm(const Register& rd, Condition cond);
inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
- inline void Dmb(BarrierDomain domain, BarrierType type);
- inline void Dsb(BarrierDomain domain, BarrierType type);
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb);
inline void Fcsel(const VRegister& fd, const VRegister& fn,
@@ -1394,7 +1361,6 @@ class MacroAssembler : public TurboAssembler {
const VRegister& fm, const VRegister& fa);
inline void Hint(SystemHint code);
inline void Hlt(int code);
- inline void Isb();
inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
@@ -1641,17 +1607,13 @@ class MacroAssembler : public TurboAssembler {
};
// Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // The stack pointer must be aligned to 16 bytes.
void Peek(const CPURegister& dst, const Operand& offset);
// Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
// values peeked will be adjacent, with the value in 'dst2' being from a
- // higher address than 'dst1'. The offset is in bytes.
- //
- // If the current stack pointer (according to StackPointer()) is csp, then
- // csp must be aligned to 16 bytes.
+ // higher address than 'dst1'. The offset is in bytes. The stack pointer must
+ // be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
@@ -1704,10 +1666,6 @@ class MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
void PopCalleeSavedRegisters();
- // Align csp for a frame, as per ActivationFrameAlignment, and make it the
- // current stack pointer.
- inline void AlignAndSetCSPForFrame();
-
// Helpers ------------------------------------------------------------------
static int SafepointRegisterStackIndex(int reg_code);
@@ -1770,11 +1728,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
- void JumpIfHeapNumber(Register object, Label* on_heap_number,
- SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
- void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
- SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
-
// Try to represent a double as a signed 64-bit int.
// This succeeds if the result compares equal to the input, so inputs of -0.0
// are represented as 0 and handled as a success.
@@ -1817,6 +1770,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@@ -1841,9 +1797,6 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
// ---- Code generation helpers ----
@@ -1940,12 +1893,12 @@ class MacroAssembler : public TurboAssembler {
// Set up a stack frame and registers as follows:
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
- // fp[-8]: SPOffset (new csp)
+ // fp[-8]: SPOffset (new sp)
// fp[-16]: CodeObject()
// fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
- // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // sp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
- // csp -> csp[0]: Space reserved for the return address.
+ // sp -> sp[0]: Space reserved for the return address.
//
// This function also stores the new frame information in the top frame, so
// that the new frame becomes the current frame.
@@ -1960,8 +1913,6 @@ class MacroAssembler : public TurboAssembler {
// * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
- //
- // The stack pointer must be csp on entry.
void LeaveExitFrame(bool save_doubles, const Register& scratch,
const Register& scratch2);
@@ -2030,11 +1981,6 @@ class MacroAssembler : public TurboAssembler {
// (such as %e, %f or %g) are VRegisters, and that arguments for integer
// placeholders are Registers.
//
- // At the moment it is only possible to print the value of csp if it is the
- // current stack pointer. Otherwise, the MacroAssembler will automatically
- // update csp on every push (using BumpSystemStackPointer), so determining its
- // value is difficult.
- //
// Format placeholders that refer to more than one argument, or to a specific
// argument, are not supported. This includes formats like "%1$d" or "%.*d".
//
@@ -2169,6 +2115,7 @@ class UseScratchRegisterScope {
Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+ VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
VRegister AcquireV(VectorFormat format) {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
}
@@ -2210,7 +2157,7 @@ class InlineSmiCheckInfo {
// Use MacroAssembler::InlineData to emit information about patchable inline
// SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
- // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
//
// The generated patch information can be read using the InlineSMICheckInfo
// class.
@@ -2230,8 +2177,8 @@ class InlineSmiCheckInfo {
// Fields in the data encoded by InlineData.
- // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
- // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // A width of 5 (Rd_width) for the SMI register precludes the use of sp,
+ // since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
// used in a patchable check. The Emit() method checks this.
//
// Note that the total size of the fields is restricted by the underlying
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index d0c464dfbe..5c72cf1c90 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -626,16 +626,15 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
const char* Simulator::xreg_names[] = {
- "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
- "x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
- "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
- "cp", "x28", "fp", "lr", "xzr", "csp"};
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
+ "x11", "x12", "x13", "x14", "x15", "ip0", "ip1", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x25", "x26", "cp", "x28", "fp", "lr", "xzr", "sp"};
const char* Simulator::wreg_names[] = {
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
"w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
"w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
- "wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
+ "wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
const char* Simulator::sreg_names[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -768,7 +767,7 @@ int Simulator::CodeFromName(const char* name) {
return i;
}
}
- if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ if ((strcmp("sp", name) == 0) || (strcmp("wsp", name) == 0)) {
return kSPRegInternalCode;
}
return -1;
@@ -1450,7 +1449,7 @@ void Simulator::VisitUnconditionalBranch(Instruction* instr) {
switch (instr->Mask(UnconditionalBranchMask)) {
case BL:
set_lr(instr->following());
- // Fall through.
+ V8_FALLTHROUGH;
case B:
set_pc(instr->ImmPCOffsetTarget());
break;
@@ -1478,7 +1477,7 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
// this, but if we do trap to allow debugging.
Debug();
}
- // Fall through.
+ V8_FALLTHROUGH;
}
case BR:
case RET: set_pc(target); break;
@@ -1630,7 +1629,7 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
// Switch on the logical operation, stripping out the NOT bit, as it has a
// different meaning for logical immediate instructions.
switch (instr->Mask(LogicalOpMask & ~NOT)) {
- case ANDS: update_flags = true; // Fall through.
+ case ANDS: update_flags = true; V8_FALLTHROUGH;
case AND: result = op1 & op2; break;
case ORR: result = op1 | op2; break;
case EOR: result = op1 ^ op2; break;
@@ -2956,7 +2955,9 @@ void Simulator::VisitSystem(Instruction* instr) {
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
DCHECK(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
- case NOP: break;
+ case NOP:
+ case CSDB:
+ break;
default: UNIMPLEMENTED();
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
@@ -2996,15 +2997,15 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
bool Simulator::PrintValue(const char* desc) {
- if (strcmp(desc, "csp") == 0) {
+ if (strcmp(desc, "sp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ PrintF(stream_, "%s sp:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
+ clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
return true;
- } else if (strcmp(desc, "wcsp") == 0) {
+ } else if (strcmp(desc, "wsp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
- PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
- clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ PrintF(stream_, "%s wsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
+ clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
return true;
}
@@ -4396,15 +4397,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_LD1_4v:
case NEON_LD1_4v_post:
ld1(vf, vreg(reg[3]), addr[3]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_3v:
case NEON_LD1_3v_post:
ld1(vf, vreg(reg[2]), addr[2]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_2v:
case NEON_LD1_2v_post:
ld1(vf, vreg(reg[1]), addr[1]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_LD1_1v:
case NEON_LD1_1v_post:
ld1(vf, vreg(reg[0]), addr[0]);
@@ -4412,15 +4416,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_ST1_4v:
case NEON_ST1_4v_post:
st1(vf, vreg(reg[3]), addr[3]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_3v:
case NEON_ST1_3v_post:
st1(vf, vreg(reg[2]), addr[2]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_2v:
case NEON_ST1_2v_post:
st1(vf, vreg(reg[1]), addr[1]);
- count++; // Fall through.
+ count++;
+ V8_FALLTHROUGH;
case NEON_ST1_1v:
case NEON_ST1_1v_post:
st1(vf, vreg(reg[0]), addr[0]);
@@ -4533,7 +4540,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_b_post:
case NEON_LD4_b:
case NEON_LD4_b_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_b:
case NEON_ST1_b_post:
case NEON_ST2_b:
@@ -4552,7 +4560,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_h_post:
case NEON_LD4_h:
case NEON_LD4_h_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_h:
case NEON_ST1_h_post:
case NEON_ST2_h:
@@ -4572,7 +4581,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_s_post:
case NEON_LD4_s:
case NEON_LD4_s_post:
- do_load = true; // Fall through.
+ do_load = true;
+ V8_FALLTHROUGH;
case NEON_ST1_s:
case NEON_ST1_s_post:
case NEON_ST2_s:
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index a8f229d764..18fa4d44ec 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -646,6 +646,7 @@ class LogicVRegister {
class Simulator : public DecoderVisitor, public SimulatorBase {
public:
static void SetRedirectInstruction(Instruction* instruction);
+ static bool ICacheMatch(void* one, void* two) { return false; }
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size) {
USE(i_cache);
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index 8cca6813f2..e8a7f1683b 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -3,9 +3,7 @@ set noparent
ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
-mtrofin@chromium.org
mstarzinger@chromium.org
-rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 5e3d0d0c2a..fc56b7e23a 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -21,8 +21,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -66,18 +65,21 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNaN()) return false;
}
-#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
- if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
- members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
- Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#fname))); \
- Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
- if (!value->IsJSFunction()) return false; \
- Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
- if (func->shared()->code() != \
- isolate->builtins()->builtin(Builtins::kMath##FName)) { \
- return false; \
- } \
+#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
+ if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
+ members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
+ Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#fname))); \
+ Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ SharedFunctionInfo* shared = Handle<JSFunction>::cast(value)->shared(); \
+ if (shared->HasLazyDeserializationBuiltinId()) { \
+ if (shared->lazy_deserialization_builtin_id() != Builtins::kMath##FName) \
+ return false; \
+ } else if (shared->code() != \
+ isolate->builtins()->builtin(Builtins::kMath##FName)) { \
+ return false; \
+ } \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
@@ -284,11 +286,12 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(Isolate* isolate) {
wasm::ErrorThrower thrower(isolate, "AsmJs::Compile");
Handle<WasmModuleObject> compiled =
- SyncCompileTranslatedAsmJs(
- isolate, &thrower,
- wasm::ModuleWireBytes(module_->begin(), module_->end()),
- parse_info()->script(),
- Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
+ isolate->wasm_engine()
+ ->SyncCompileTranslatedAsmJs(
+ isolate, &thrower,
+ wasm::ModuleWireBytes(module_->begin(), module_->end()),
+ parse_info()->script(),
+ Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
.ToHandleChecked();
DCHECK(!thrower.error());
compile_time_ = compile_timer.Elapsed().InMillisecondsF();
@@ -389,7 +392,8 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
wasm::ErrorThrower thrower(isolate, "AsmJs::Instantiate");
MaybeHandle<Object> maybe_module_object =
- wasm::SyncInstantiate(isolate, &thrower, module, foreign, memory);
+ isolate->wasm_engine()->SyncInstantiate(isolate, &thrower, module,
+ foreign, memory);
if (maybe_module_object.is_null()) {
// An exception caused by the module start function will be set as pending
// and bypass the {ErrorThrower}, this happens in case of a stack overflow.
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 6be80bf7af..f210b42a62 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -745,6 +745,12 @@ void AsmJsParser::ValidateFunction() {
CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
+
+ // Check against limit on number of parameters.
+ if (params.size() >= kV8MaxWasmFunctionParams) {
+ FAIL("Number of parameters exceeds internal limit");
+ }
+
CachedVector<ValueType> locals(cached_valuetype_vectors_);
ValidateFunctionLocals(params.size(), &locals);
diff --git a/deps/v8/src/asmjs/switch-logic.h b/deps/v8/src/asmjs/switch-logic.h
index 3ef34d9461..f770ddc33d 100644
--- a/deps/v8/src/asmjs/switch-logic.h
+++ b/deps/v8/src/asmjs/switch-logic.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ASMJS_SWITCH_LOGIC_H
-#define V8_ASMJS_SWITCH_LOGIC_H
+#ifndef V8_ASMJS_SWITCH_LOGIC_H_
+#define V8_ASMJS_SWITCH_LOGIC_H_
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@@ -30,4 +30,4 @@ V8_EXPORT_PRIVATE CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
} // namespace internal
} // namespace v8
-#endif // V8_ASMJS_SWITCH_LOGIC_H
+#endif // V8_ASMJS_SWITCH_LOGIC_H_
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 1b83735bc9..c566f35acb 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -176,12 +176,12 @@ AssemblerBase::~AssemblerBase() {
if (own_buffer_) DeleteArray(buffer_);
}
-void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
+void AssemblerBase::FlushICache(void* start, size_t size) {
if (size == 0) return;
#if defined(USE_SIMULATOR)
- base::LockGuard<base::Mutex> lock_guard(isolate->simulator_i_cache_mutex());
- Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
+ base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
+ Simulator::FlushICache(Simulator::i_cache(), start, size);
#else
CpuFeatures::FlushICache(start, size);
#endif // USE_SIMULATOR
@@ -195,9 +195,6 @@ void AssemblerBase::Print(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
-PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
- : PredictableCodeSizeScope(assembler, -1) {}
-
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
@@ -208,10 +205,7 @@ PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
- // TODO(svenpanne) Remove the 'if' when everything works.
- if (expected_size_ >= 0) {
- CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
- }
+ CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
assembler_->set_predictable_code_size(old_value_);
}
@@ -301,16 +295,16 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-void RelocInfo::set_wasm_context_reference(Isolate* isolate, Address address,
+void RelocInfo::set_wasm_context_reference(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmContextReference(rmode_));
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
-void RelocInfo::set_global_handle(Isolate* isolate, Address address,
+void RelocInfo::set_global_handle(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::wasm_call_address() const {
@@ -318,10 +312,10 @@ Address RelocInfo::wasm_call_address() const {
return Assembler::target_address_at(pc_, constant_pool_);
}
-void RelocInfo::set_wasm_call_address(Isolate* isolate, Address address,
+void RelocInfo::set_wasm_call_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_CALL);
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
@@ -341,17 +335,16 @@ Address RelocInfo::wasm_context_reference() const {
}
void RelocInfo::update_wasm_function_table_size_reference(
- Isolate* isolate, uint32_t old_size, uint32_t new_size,
- ICacheFlushMode icache_flush_mode) {
+ uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
- set_embedded_size(isolate, new_size, icache_flush_mode);
+ set_embedded_size(new_size, icache_flush_mode);
}
-void RelocInfo::set_target_address(Isolate* isolate, Address target,
+void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, target,
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
@@ -449,7 +442,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
}
}
last_pc_ = rinfo->pc();
- last_mode_ = rmode;
#ifdef DEBUG
DCHECK_LE(begin_pos - pos_, kMaxSize);
#endif
@@ -561,7 +553,8 @@ void RelocIterator::next() {
done_ = true;
}
-RelocIterator::RelocIterator(Code* code, int mode_mask) {
+RelocIterator::RelocIterator(Code* code, int mode_mask)
+ : mode_mask_(mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
@@ -569,35 +562,30 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
// Relocation info is read backwards.
pos_ = code->relocation_start() + code->relocation_size();
end_ = code->relocation_start();
- done_ = false;
- mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
-RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
+ : mode_mask_(mode_mask) {
rinfo_.pc_ = desc.buffer;
- rinfo_.data_ = 0;
// Relocation info is read backwards.
pos_ = desc.buffer + desc.buffer_size;
end_ = pos_ - desc.reloc_size;
- done_ = false;
- mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
RelocIterator::RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
- int mode_mask) {
+ int mode_mask)
+ : mode_mask_(mode_mask) {
rinfo_.pc_ = instructions.start();
- rinfo_.data_ = 0;
rinfo_.constant_pool_ = const_pool;
+ rinfo_.flags_ = RelocInfo::kInNativeWasmCode;
// Relocation info is read backwards.
pos_ = reloc_info.start() + reloc_info.size();
end_ = reloc_info.start();
- done_ = false;
- mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -606,7 +594,7 @@ RelocIterator::RelocIterator(Vector<byte> instructions,
// Implementation of RelocInfo
#ifdef DEBUG
-bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
+bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code
// generation.
@@ -621,10 +609,8 @@ bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case NONE32:
- return "no reloc 32";
- case NONE64:
- return "no reloc 64";
+ case NONE:
+ return "no reloc";
case EMBEDDED_OBJECT:
return "embedded object";
case CODE_TARGET:
@@ -686,9 +672,21 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
<< ") (" << static_cast<const void*>(target_external_reference())
<< ")";
} else if (IsCodeTarget(rmode_)) {
- Code* code = Code::GetCodeFromTargetAddress(target_address());
- os << " (" << Code::Kind2String(code->kind()) << ") ("
- << static_cast<const void*>(target_address()) << ")";
+ const Address code_target = target_address();
+ if (flags_ & kInNativeWasmCode) {
+ os << " (wasm trampoline) ";
+ } else {
+ Code* code = Code::GetCodeFromTargetAddress(code_target);
+ DCHECK(code->IsCode());
+ os << " (" << Code::Kind2String(code->kind());
+ if (Builtins::IsBuiltin(code)) {
+ os << " " << Builtins::name(code->builtin_index());
+ } else if (code->kind() == Code::STUB) {
+ os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
+ }
+ os << ") ";
+ }
+ os << " (" << static_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
@@ -744,8 +742,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case WASM_GLOBAL_HANDLE:
case WASM_CALL:
case JS_TO_WASM_CALL:
- case NONE32:
- case NONE64:
+ case NONE:
break;
case NUMBER_OF_MODES:
case PC_JUMP:
@@ -1465,6 +1462,12 @@ ExternalReference ExternalReference::copy_typed_array_elements_to_typed_array(
Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsToTypedArray)));
}
+ExternalReference ExternalReference::copy_typed_array_elements_slice(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsSlice)));
+}
+
ExternalReference ExternalReference::try_internalize_string_function(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1877,22 +1880,5 @@ void Assembler::RequestHeapObject(HeapObjectRequest request) {
heap_object_requests_.push_front(request);
}
-namespace {
-int caller_saved_codes[kNumJSCallerSaved];
-}
-
-void SetUpJSCallerSavedCodeData() {
- int i = 0;
- for (int r = 0; r < kNumRegs; r++)
- if ((kJSCallerSaved & (1 << r)) != 0) caller_saved_codes[i++] = r;
-
- DCHECK_EQ(i, kNumJSCallerSaved);
-}
-
-int JSCallerSavedCode(int n) {
- DCHECK(0 <= n && n < kNumJSCallerSaved);
- return caller_saved_codes[n];
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 0cebdbc2d7..c45ec6910d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -57,15 +57,12 @@ class ApiFunction;
namespace internal {
// Forward declarations.
+class InstructionStream;
class Isolate;
+class SCTableReference;
class SourcePosition;
class StatsCounter;
-void SetUpJSCallerSavedCodeData();
-
-// Return the code of the n-th saved register available to JavaScript.
-int JSCallerSavedCode(int n);
-
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.
@@ -162,7 +159,7 @@ class AssemblerBase: public Malloced {
static const int kMinimalBufferSize = 4*KB;
- static void FlushICache(Isolate* isolate, void* start, size_t size);
+ static void FlushICache(void* start, size_t size);
protected:
// The buffer into which code and relocation info are generated. It could
@@ -220,16 +217,14 @@ class DontEmitDebugCodeScope BASE_EMBEDDED {
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
- explicit PredictableCodeSizeScope(AssemblerBase* assembler);
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
- void ExpectSize(int expected_size) { expected_size_ = expected_size; }
private:
- AssemblerBase* assembler_;
- int expected_size_;
- int start_offset_;
- bool old_value_;
+ AssemblerBase* const assembler_;
+ int const expected_size_;
+ int const start_offset_;
+ bool const old_value_;
};
@@ -252,6 +247,8 @@ class CpuFeatureScope BASE_EMBEDDED {
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
+ // Define a destructor to avoid unused variable warnings.
+ ~CpuFeatureScope() {}
#endif
};
@@ -283,7 +280,7 @@ class CpuFeatures : public AllStatic {
return (supported_ & (1u << f)) != 0;
}
- static inline bool SupportsCrankshaft();
+ static inline bool SupportsOptimizer();
static inline bool SupportsWasmSimd128();
@@ -341,6 +338,12 @@ enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
class RelocInfo {
public:
+ enum Flag : uint8_t {
+ kNoFlags = 0,
+ kInNativeWasmCode = 1u << 0, // Reloc info belongs to native wasm code.
+ };
+ typedef base::Flags<Flag> Flags;
+
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
@@ -357,7 +360,7 @@ class RelocInfo {
// The maximum pc delta that will use the short encoding.
static const int kMaxSmallPCDelta;
- enum Mode {
+ enum Mode : int8_t {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET,
EMBEDDED_OBJECT,
@@ -395,8 +398,7 @@ class RelocInfo {
// Pseudo-types
NUMBER_OF_MODES,
- NONE32, // never recorded 32-bit value
- NONE64, // never recorded 64-bit value
+ NONE, // never recorded value
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
@@ -456,9 +458,7 @@ class RelocInfo {
static inline bool IsInternalReferenceEncoded(Mode mode) {
return mode == INTERNAL_REFERENCE_ENCODED;
}
- static inline bool IsNone(Mode mode) {
- return mode == NONE32 || mode == NONE64;
- }
+ static inline bool IsNone(Mode mode) { return mode == NONE; }
static inline bool IsWasmContextReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE;
}
@@ -476,7 +476,7 @@ class RelocInfo {
mode == WASM_CALL || mode == JS_TO_WASM_CALL;
}
- static inline int ModeMask(Mode mode) { return 1 << mode; }
+ static constexpr int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
byte* pc() const { return pc_; }
@@ -485,6 +485,9 @@ class RelocInfo {
intptr_t data() const { return data_; }
Code* host() const { return host_; }
Address constant_pool() const { return constant_pool_; }
+ void set_constant_pool(Address constant_pool) {
+ constant_pool_ = constant_pool;
+ }
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
@@ -508,25 +511,22 @@ class RelocInfo {
Address wasm_call_address() const;
void set_wasm_context_reference(
- Isolate* isolate, Address address,
+ Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
- Isolate* isolate, uint32_t old_base, uint32_t new_base,
+ uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
- Isolate* isolate, Address target,
+ Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void set_global_handle(
- Isolate* isolate, Address address,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_global_handle(Address address, ICacheFlushMode icache_flush_mode =
+ FLUSH_ICACHE_IF_NEEDED);
void set_wasm_call_address(
- Isolate*, Address,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_js_to_wasm_address(
- Isolate*, Address,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -539,7 +539,7 @@ class RelocInfo {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry(
- Isolate* isolate, Address target,
+ Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Cell* target_cell());
@@ -585,15 +585,15 @@ class RelocInfo {
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
- INLINE(void WipeOut(Isolate* isolate));
+ INLINE(void WipeOut());
template <typename ObjectVisitor>
- inline void Visit(Isolate* isolate, ObjectVisitor* v);
+ inline void Visit(ObjectVisitor* v);
#ifdef DEBUG
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
- static bool RequiresRelocation(Isolate* isolate, const CodeDesc& desc);
+ static bool RequiresRelocation(const CodeDesc& desc);
#endif
#ifdef ENABLE_DISASSEMBLER
@@ -609,10 +609,8 @@ class RelocInfo {
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
- void set_embedded_address(Isolate* isolate, Address address,
- ICacheFlushMode flush_mode);
- void set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode);
+ void set_embedded_address(Address address, ICacheFlushMode flush_mode);
+ void set_embedded_size(uint32_t size, ICacheFlushMode flush_mode);
uint32_t embedded_size() const;
Address embedded_address() const;
@@ -623,9 +621,10 @@ class RelocInfo {
// comment).
byte* pc_;
Mode rmode_;
- intptr_t data_;
+ intptr_t data_ = 0;
Code* host_;
Address constant_pool_ = nullptr;
+ Flags flags_;
friend class RelocIterator;
};
@@ -635,7 +634,6 @@ class RelocInfo {
class RelocInfoWriter BASE_EMBEDDED {
public:
RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
- RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@@ -651,10 +649,7 @@ class RelocInfoWriter BASE_EMBEDDED {
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
- // On ia32 and arm this is 1 + 4 + 1 + 1 + 4 = 11.
- // On x64 this is 1 + 4 + 1 + 1 + 8 == 15;
- // Here we use the maximum of the two.
- static const int kMaxSize = 15;
+ static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
private:
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
@@ -669,7 +664,6 @@ class RelocInfoWriter BASE_EMBEDDED {
byte* pos_;
byte* last_pc_;
- RelocInfo::Mode last_mode_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
};
@@ -733,19 +727,14 @@ class RelocIterator: public Malloced {
const byte* pos_;
const byte* end_;
RelocInfo rinfo_;
- bool done_;
- int mode_mask_;
+ bool done_ = false;
+ const int mode_mask_;
+
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
-
//------------------------------------------------------------------------------
-// External function
-
-//----------------------------------------------------------------------------
-class SCTableReference;
-class Debug_Address;
-
+// External references
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated in
@@ -800,9 +789,7 @@ class ExternalReference BASE_EMBEDDED {
static void SetUp();
- // These functions must use the isolate in a thread-safe way.
- typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
- Type type);
+ typedef void* ExternalReferenceRedirector(void* original, Type type);
ExternalReference() : address_(nullptr) {}
@@ -999,6 +986,7 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference copy_typed_array_elements_to_typed_array(
Isolate* isolate);
+ static ExternalReference copy_typed_array_elements_slice(Isolate* isolate);
static ExternalReference page_flags(Page* page);
@@ -1073,9 +1061,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector == nullptr)
- ? address
- : (*redirector)(isolate, address, type);
+ void* answer =
+ (redirector == nullptr) ? address : (*redirector)(address, type);
return answer;
}
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index ece6b5048a..32362199ae 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -6,7 +6,6 @@ littledan@chromium.org
marja@chromium.org
mstarzinger@chromium.org
neis@chromium.org
-rossberg@chromium.org
verwaest@chromium.org
# COMPONENT: Blink>JavaScript>Language
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.h b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
index 837595f41b..400196da68 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.h
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
-#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
+#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
#include "src/ast/ast-traversal-visitor.h"
#include "src/base/macros.h"
@@ -33,4 +33,4 @@ class AstFunctionLiteralIdReindexer final
} // namespace internal
} // namespace v8
-#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
deleted file mode 100644
index ade1a85349..0000000000
--- a/deps/v8/src/ast/ast-numbering.cc
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/ast-numbering.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/compiler.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
- public:
- AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
- Compiler::EagerInnerFunctionLiterals* eager_literals)
- : zone_(zone), eager_literals_(eager_literals), suspend_count_(0) {
- InitializeAstVisitor(stack_limit);
- }
-
- bool Renumber(FunctionLiteral* node);
-
- private:
-// AST node visitor interface.
-#define DEFINE_VISIT(type) void Visit##type(type* node);
- AST_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
- void VisitSuspend(Suspend* node);
-
- void VisitStatementsAndDeclarations(Block* node);
- void VisitStatements(ZoneList<Statement*>* statements);
- void VisitDeclarations(Declaration::List* declarations);
- void VisitArguments(ZoneList<Expression*>* arguments);
- void VisitLiteralProperty(LiteralProperty* property);
-
- Zone* zone() const { return zone_; }
-
- Zone* zone_;
- Compiler::EagerInnerFunctionLiterals* eager_literals_;
- int suspend_count_;
- FunctionKind function_kind_;
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
-};
-
-void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
- VisitVariableProxy(node->proxy());
-}
-
-void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
-}
-
-void AstNumberingVisitor::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* node) {
- Visit(node->statement());
-}
-
-void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
-}
-
-void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
-}
-
-void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
-}
-
-void AstNumberingVisitor::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* node) {
-}
-
-void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
- Visit(node->block());
- Visit(node->result());
-}
-
-void AstNumberingVisitor::VisitLiteral(Literal* node) {
-}
-
-void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
-}
-
-void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
-}
-
-void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
-}
-
-void AstNumberingVisitor::VisitSuperPropertyReference(
- SuperPropertyReference* node) {
- Visit(node->this_var());
- Visit(node->home_object());
-}
-
-void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
- Visit(node->this_var());
- Visit(node->new_target_var());
- Visit(node->this_function_var());
-}
-
-void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitSuspend(Suspend* node) {
- node->set_suspend_id(suspend_count_);
- suspend_count_++;
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitYield(Yield* node) { VisitSuspend(node); }
-
-void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
- node->set_suspend_id(suspend_count_++);
- if (IsAsyncGeneratorFunction(function_kind_)) {
- node->set_await_iterator_close_suspend_id(suspend_count_++);
- node->set_await_delegated_iterator_output_suspend_id(suspend_count_++);
- }
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
-
-void AstNumberingVisitor::VisitThrow(Throw* node) {
- Visit(node->exception());
-}
-
-void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitBlock(Block* node) {
- VisitStatementsAndDeclarations(node);
-}
-
-void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
- Scope* scope = node->scope();
- DCHECK(scope == nullptr || !scope->HasBeenRemoved());
- if (scope) VisitDeclarations(scope->declarations());
- VisitStatements(node->statements());
-}
-
-void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
- VisitVariableProxy(node->proxy());
- VisitFunctionLiteral(node->fun());
-}
-
-void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
- VisitArguments(node->arguments());
-}
-
-void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
- Visit(node->expression());
- Visit(node->statement());
-}
-
-void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
- node->set_first_suspend_id(suspend_count_);
- Visit(node->body());
- Visit(node->cond());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
- node->set_first_suspend_id(suspend_count_);
- Visit(node->cond());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
- DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
- Visit(node->try_block());
- Visit(node->catch_block());
-}
-
-void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Visit(node->try_block());
- Visit(node->finally_block());
-}
-
-void AstNumberingVisitor::VisitProperty(Property* node) {
- Visit(node->key());
- Visit(node->obj());
-}
-
-void AstNumberingVisitor::VisitResolvedProperty(ResolvedProperty* node) {
- Visit(node->object());
- Visit(node->property());
-}
-
-void AstNumberingVisitor::VisitAssignment(Assignment* node) {
- Visit(node->target());
- Visit(node->value());
-}
-
-void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
- VisitBinaryOperation(node->binary_operation());
- VisitAssignment(node);
-}
-
-void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
- Visit(node->left());
- Visit(node->right());
-}
-
-void AstNumberingVisitor::VisitNaryOperation(NaryOperation* node) {
- Visit(node->first());
- for (size_t i = 0; i < node->subsequent_length(); ++i) {
- Visit(node->subsequent(i));
- }
-}
-
-void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
- Visit(node->left());
- Visit(node->right());
-}
-
-void AstNumberingVisitor::VisitSpread(Spread* node) {
- Visit(node->expression());
-}
-
-void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
- UNREACHABLE();
-}
-
-void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
- Visit(node->iterable());
-}
-
-void AstNumberingVisitor::VisitGetTemplateObject(GetTemplateObject* node) {}
-
-void AstNumberingVisitor::VisitImportCallExpression(
- ImportCallExpression* node) {
- Visit(node->argument());
-}
-
-void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
- Visit(node->enumerable()); // Not part of loop.
- node->set_first_suspend_id(suspend_count_);
- Visit(node->each());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
- Visit(node->assign_iterator()); // Not part of loop.
- Visit(node->assign_next());
- node->set_first_suspend_id(suspend_count_);
- Visit(node->next_result());
- Visit(node->result_done());
- Visit(node->assign_each());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitConditional(Conditional* node) {
- Visit(node->condition());
- Visit(node->then_expression());
- Visit(node->else_expression());
-}
-
-void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
- Visit(node->condition());
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Visit(node->else_statement());
- }
-}
-
-void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
- Visit(node->tag());
- for (CaseClause* clause : *node->cases()) {
- if (!clause->is_default()) Visit(clause->label());
- VisitStatements(clause->statements());
- }
-}
-
-void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
- if (node->init() != nullptr) Visit(node->init()); // Not part of loop.
- node->set_first_suspend_id(suspend_count_);
- if (node->cond() != nullptr) Visit(node->cond());
- if (node->next() != nullptr) Visit(node->next());
- Visit(node->body());
- node->set_suspend_count(suspend_count_ - node->first_suspend_id());
-}
-
-void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
- if (node->extends()) Visit(node->extends());
- if (node->constructor()) Visit(node->constructor());
- if (node->static_fields_initializer() != nullptr) {
- Visit(node->static_fields_initializer());
- }
- if (node->instance_fields_initializer_function() != nullptr) {
- Visit(node->instance_fields_initializer_function());
- }
- for (int i = 0; i < node->properties()->length(); i++) {
- VisitLiteralProperty(node->properties()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitInitializeClassFieldsStatement(
- InitializeClassFieldsStatement* node) {
- for (int i = 0; i < node->fields()->length(); i++) {
- VisitLiteralProperty(node->fields()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
- for (int i = 0; i < node->properties()->length(); i++) {
- VisitLiteralProperty(node->properties()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
- Visit(node->key());
- Visit(node->value());
-}
-
-void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
- for (int i = 0; i < node->values()->length(); i++) {
- Visit(node->values()->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitCall(Call* node) {
- Visit(node->expression());
- VisitArguments(node->arguments());
-}
-
-void AstNumberingVisitor::VisitCallNew(CallNew* node) {
- Visit(node->expression());
- VisitArguments(node->arguments());
-}
-
-void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
- if (statements == nullptr) return;
- for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
- if (statements->at(i)->IsJump()) break;
- }
-}
-
-void AstNumberingVisitor::VisitDeclarations(Declaration::List* decls) {
- for (Declaration* decl : *decls) Visit(decl);
-}
-
-void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- Visit(arguments->at(i));
- }
-}
-
-void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
- if (node->ShouldEagerCompile()) {
- if (eager_literals_) {
- eager_literals_->Add(new (zone())
- ThreadedListZoneEntry<FunctionLiteral*>(node));
- }
-
- // If the function literal is being eagerly compiled, recurse into the
- // declarations and body of the function literal.
- if (!AstNumbering::Renumber(stack_limit_, zone_, node, eager_literals_)) {
- SetStackOverflow();
- return;
- }
- }
-}
-
-void AstNumberingVisitor::VisitRewritableExpression(
- RewritableExpression* node) {
- Visit(node->expression());
-}
-
-bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
- DeclarationScope* scope = node->scope();
- DCHECK(!scope->HasBeenRemoved());
- function_kind_ = node->kind();
-
- VisitDeclarations(scope->declarations());
- VisitStatements(node->body());
-
- node->set_suspend_count(suspend_count_);
-
- return !HasStackOverflow();
-}
-
-bool AstNumbering::Renumber(
- uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
- Compiler::EagerInnerFunctionLiterals* eager_literals) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
- return visitor.Renumber(function);
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/ast-numbering.h b/deps/v8/src/ast/ast-numbering.h
deleted file mode 100644
index 11122803b8..0000000000
--- a/deps/v8/src/ast/ast-numbering.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_AST_NUMBERING_H_
-#define V8_AST_AST_NUMBERING_H_
-
-#include <stdint.h>
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FunctionLiteral;
-class Isolate;
-class Zone;
-template <typename T>
-class ThreadedList;
-template <typename T>
-class ThreadedListZoneEntry;
-template <typename T>
-class ZoneVector;
-
-namespace AstNumbering {
-// Assign bailout IDs, and generator suspend IDs to an AST node tree; perform
-// catch prediction for TryStatements. If |eager_literals| is non-null, adds any
-// eager inner literal functions into it.
-bool Renumber(
- uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
- ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
-}
-
-// Some details on suspend IDs
-// -------------------------
-//
-// In order to assist Ignition in generating bytecode for a generator function,
-// we assign a unique number (the suspend ID) to each Suspend node in its AST.
-// We also annotate loops with the number of suspends they contain
-// (loop.suspend_count) and the smallest ID of those (loop.first_suspend_id),
-// and we annotate the function itself with the number of suspends it contains
-// (function.suspend_count).
-//
-// The way in which we choose the IDs is simply by enumerating the Suspend
-// nodes.
-// Ignition relies on the following properties:
-// - For each loop l and each suspend y of l:
-// l.first_suspend_id <=
-// s.suspend_id < l.first_suspend_id + l.suspend_count
-// - For the generator function f itself and each suspend s of f:
-// 0 <= s.suspend_id < f.suspend_count
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_AST_NUMBERING_H_
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index da14d87475..2856abb40c 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -311,7 +311,7 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
: LiteralProperty(key, value, is_computed_name),
kind_(kind),
is_static_(is_static),
- computed_name_var_(nullptr) {}
+ private_or_computed_name_var_(nullptr) {}
bool ObjectLiteral::Property::IsCompileTimeValue() const {
return kind_ == CONSTANT ||
@@ -683,8 +683,8 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
}
}
}
- return isolate->factory()->NewTemplateObjectDescription(
- this->hash(), raw_strings, cooked_strings);
+ return isolate->factory()->NewTemplateObjectDescription(raw_strings,
+ cooked_strings);
}
static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index f608621d3b..661c5b7293 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -437,21 +437,12 @@ class IterationStatement : public BreakableStatement {
ZoneList<const AstRawString*>* labels() const { return labels_; }
- int suspend_count() const { return suspend_count_; }
- int first_suspend_id() const { return first_suspend_id_; }
- void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
- void set_first_suspend_id(int first_suspend_id) {
- first_suspend_id_ = first_suspend_id;
- }
-
protected:
IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
- body_(nullptr),
- suspend_count_(0),
- first_suspend_id_(0) {}
+ body_(nullptr) {}
void Initialize(Statement* body) { body_ = body; }
static const uint8_t kNextBitFieldIndex =
@@ -460,8 +451,6 @@ class IterationStatement : public BreakableStatement {
private:
ZoneList<const AstRawString*>* labels_;
Statement* body_;
- int suspend_count_;
- int first_suspend_id_;
};
@@ -1486,6 +1475,7 @@ class ArrayLiteral final : public AggregateLiteral {
ZoneList<Expression*>* values_;
};
+enum class HoleCheckMode { kRequired, kElided };
class VariableProxy final : public Expression {
public:
@@ -1540,6 +1530,11 @@ class VariableProxy final : public Expression {
HoleCheckModeField::update(bit_field_, HoleCheckMode::kRequired);
}
+ bool is_private_field() const { return IsPrivateField::decode(bit_field_); }
+ void set_is_private_field() {
+ bit_field_ = IsPrivateField::update(bit_field_, true);
+ }
+
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1559,7 +1554,8 @@ class VariableProxy final : public Expression {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
- HoleCheckModeField::encode(HoleCheckMode::kElided);
+ HoleCheckModeField::encode(HoleCheckMode::kElided) |
+ IsPrivateField::encode(false);
}
explicit VariableProxy(const VariableProxy* copy_from);
@@ -1571,6 +1567,7 @@ class VariableProxy final : public Expression {
class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
class HoleCheckModeField
: public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
+ class IsPrivateField : public BitField<bool, HoleCheckModeField::kNext, 1> {};
union {
const AstRawString* raw_name_; // if !is_resolved_
@@ -1590,7 +1587,6 @@ enum LhsKind {
KEYED_SUPER_PROPERTY
};
-
class Property final : public Expression {
public:
bool IsValidReferenceExpression() const { return true; }
@@ -2096,11 +2092,6 @@ class Suspend : public Expression {
return OnAbruptResumeField::decode(bit_field_);
}
- int suspend_id() const { return suspend_id_; }
- void set_suspend_id(int id) { suspend_id_ = id; }
-
- inline bool IsInitialYield() const { return suspend_id_ == 0 && IsYield(); }
-
private:
friend class AstNodeFactory;
friend class Yield;
@@ -2109,11 +2100,10 @@ class Suspend : public Expression {
Suspend(NodeType node_type, Expression* expression, int pos,
OnAbruptResume on_abrupt_resume)
- : Expression(pos, node_type), suspend_id_(-1), expression_(expression) {
+ : Expression(pos, node_type), expression_(expression) {
bit_field_ |= OnAbruptResumeField::encode(on_abrupt_resume);
}
- int suspend_id_;
Expression* expression_;
class OnAbruptResumeField
@@ -2128,47 +2118,11 @@ class Yield final : public Suspend {
};
class YieldStar final : public Suspend {
- public:
- // In addition to the normal suspend for yield*, a yield* in an async
- // generator has 2 additional suspends:
- // - One for awaiting the iterator result of closing the generator when
- // resumed with a "throw" completion, and a throw method is not present
- // on the delegated iterator (await_iterator_close_suspend_id)
- // - One for awaiting the iterator result yielded by the delegated iterator
- // (await_delegated_iterator_output_suspend_id)
- int await_iterator_close_suspend_id() const {
- return await_iterator_close_suspend_id_;
- }
- void set_await_iterator_close_suspend_id(int id) {
- await_iterator_close_suspend_id_ = id;
- }
-
- int await_delegated_iterator_output_suspend_id() const {
- return await_delegated_iterator_output_suspend_id_;
- }
- void set_await_delegated_iterator_output_suspend_id(int id) {
- await_delegated_iterator_output_suspend_id_ = id;
- }
-
- inline int suspend_count() const {
- if (await_iterator_close_suspend_id_ != -1) {
- DCHECK_NE(-1, await_delegated_iterator_output_suspend_id_);
- return 3;
- }
- return 1;
- }
-
private:
friend class AstNodeFactory;
-
YieldStar(Expression* expression, int pos)
: Suspend(kYieldStar, expression, pos,
- Suspend::OnAbruptResume::kNoControl),
- await_iterator_close_suspend_id_(-1),
- await_delegated_iterator_output_suspend_id_(-1) {}
-
- int await_iterator_close_suspend_id_;
- int await_delegated_iterator_output_suspend_id_;
+ Suspend::OnAbruptResume::kNoControl) {}
};
class Await final : public Suspend {
@@ -2407,14 +2361,29 @@ class FunctionLiteral final : public Expression {
// about a class literal's properties from the parser to the code generator.
class ClassLiteralProperty final : public LiteralProperty {
public:
- enum Kind : uint8_t { METHOD, GETTER, SETTER, FIELD };
+ enum Kind : uint8_t { METHOD, GETTER, SETTER, PUBLIC_FIELD, PRIVATE_FIELD };
Kind kind() const { return kind_; }
bool is_static() const { return is_static_; }
- void set_computed_name_var(Variable* var) { computed_name_var_ = var; }
- Variable* computed_name_var() const { return computed_name_var_; }
+ void set_computed_name_var(Variable* var) {
+ DCHECK_EQ(PUBLIC_FIELD, kind());
+ private_or_computed_name_var_ = var;
+ }
+ Variable* computed_name_var() const {
+ DCHECK_EQ(PUBLIC_FIELD, kind());
+ return private_or_computed_name_var_;
+ }
+
+ void set_private_field_name_var(Variable* var) {
+ DCHECK_EQ(PRIVATE_FIELD, kind());
+ private_or_computed_name_var_ = var;
+ }
+ Variable* private_field_name_var() const {
+ DCHECK_EQ(PRIVATE_FIELD, kind());
+ return private_or_computed_name_var_;
+ }
private:
friend class AstNodeFactory;
@@ -2424,7 +2393,7 @@ class ClassLiteralProperty final : public LiteralProperty {
Kind kind_;
bool is_static_;
- Variable* computed_name_var_;
+ Variable* private_or_computed_name_var_;
};
class InitializeClassFieldsStatement final : public Statement {
@@ -2665,7 +2634,6 @@ class GetTemplateObject final : public Expression {
const ZoneList<const AstRawString*>* raw_strings() const {
return raw_strings_;
}
- int hash() const { return hash_; }
Handle<TemplateObjectDescription> GetOrBuildDescription(Isolate* isolate);
@@ -2673,16 +2641,13 @@ class GetTemplateObject final : public Expression {
friend class AstNodeFactory;
GetTemplateObject(const ZoneList<const AstRawString*>* cooked_strings,
- const ZoneList<const AstRawString*>* raw_strings, int hash,
- int pos)
+ const ZoneList<const AstRawString*>* raw_strings, int pos)
: Expression(pos, kGetTemplateObject),
cooked_strings_(cooked_strings),
- raw_strings_(raw_strings),
- hash_(hash) {}
+ raw_strings_(raw_strings) {}
const ZoneList<const AstRawString*>* cooked_strings_;
const ZoneList<const AstRawString*>* raw_strings_;
- int hash_;
};
// ----------------------------------------------------------------------------
@@ -3257,9 +3222,8 @@ class AstNodeFactory final BASE_EMBEDDED {
GetTemplateObject* NewGetTemplateObject(
const ZoneList<const AstRawString*>* cooked_strings,
- const ZoneList<const AstRawString*>* raw_strings, int hash, int pos) {
- return new (zone_)
- GetTemplateObject(cooked_strings, raw_strings, hash, pos);
+ const ZoneList<const AstRawString*>* raw_strings, int pos) {
+ return new (zone_) GetTemplateObject(cooked_strings, raw_strings, pos);
}
ImportCallExpression* NewImportCallExpression(Expression* args, int pos) {
diff --git a/deps/v8/src/ast/compile-time-value.h b/deps/v8/src/ast/compile-time-value.h
index e8ded43122..874bc1b32f 100644
--- a/deps/v8/src/ast/compile-time-value.h
+++ b/deps/v8/src/ast/compile-time-value.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_COMPILE_TIME_VALUE
-#define V8_AST_COMPILE_TIME_VALUE
+#ifndef V8_AST_COMPILE_TIME_VALUE_H_
+#define V8_AST_COMPILE_TIME_VALUE_H_
#include "src/allocation.h"
#include "src/globals.h"
@@ -43,4 +43,4 @@ class CompileTimeValue : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_AST_COMPILE_TIME_VALUE
+#endif // V8_AST_COMPILE_TIME_VALUE_H_
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 374c848289..d898a70479 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -909,8 +909,6 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
@@ -919,8 +917,6 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -929,8 +925,6 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
@@ -941,8 +935,6 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
IndentedScope indent(this, "FOR IN", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@@ -951,8 +943,6 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position());
- PrintIndented("SUSPEND COUNT");
- Print(" %d\n", node->suspend_count());
PrintIndentedVisit("INIT", node->assign_iterator());
PrintIndentedVisit("NEXT", node->next_result());
PrintIndentedVisit("DONE", node->result_done());
@@ -1053,8 +1043,11 @@ void AstPrinter::PrintClassProperties(
case ClassLiteral::Property::SETTER:
prop_kind = "SETTER";
break;
- case ClassLiteral::Property::FIELD:
- prop_kind = "FIELD";
+ case ClassLiteral::Property::PUBLIC_FIELD:
+ prop_kind = "PUBLIC FIELD";
+ break;
+ case ClassLiteral::Property::PRIVATE_FIELD:
+ prop_kind = "PRIVATE FIELD";
break;
}
EmbeddedVector<char, 128> buf;
@@ -1208,21 +1201,21 @@ void AstPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
void AstPrinter::VisitYield(Yield* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "YIELD id %d", node->suspend_id());
+ SNPrintF(buf, "YIELD");
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitYieldStar(YieldStar* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "YIELD_STAR id %d", node->suspend_id());
+ SNPrintF(buf, "YIELD_STAR");
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitAwait(Await* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "AWAIT id %d", node->suspend_id());
+ SNPrintF(buf, "AWAIT");
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
@@ -1232,7 +1225,6 @@ void AstPrinter::VisitThrow(Throw* node) {
Visit(node->exception());
}
-
void AstPrinter::VisitProperty(Property* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY");
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 8f2f85080c..a87e756a0e 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -643,7 +643,7 @@ void DeclarationScope::AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate) {
}
}
-void DeclarationScope::Analyze(ParseInfo* info) {
+bool DeclarationScope::Analyze(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(
info->runtime_call_stats(),
info->on_background_thread()
@@ -681,7 +681,7 @@ void DeclarationScope::Analyze(ParseInfo* info) {
info->consumed_preparsed_scope_data()->RestoreScopeAllocationData(scope);
}
- scope->AllocateVariables(info);
+ if (!scope->AllocateVariables(info)) return false;
#ifdef DEBUG
if (info->is_native() ? FLAG_print_builtin_scopes : FLAG_print_scopes) {
@@ -691,6 +691,8 @@ void DeclarationScope::Analyze(ParseInfo* info) {
scope->CheckScopePositions();
scope->CheckZones();
#endif
+
+ return true;
}
void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
@@ -1342,13 +1344,18 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
return nullptr;
}
-void DeclarationScope::AllocateVariables(ParseInfo* info) {
+bool DeclarationScope::AllocateVariables(ParseInfo* info) {
// Module variables must be allocated before variable resolution
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
- ResolveVariablesRecursively(info);
+ if (!ResolveVariablesRecursively(info)) {
+ DCHECK(info->pending_error_handler()->has_pending_error());
+ return false;
+ }
AllocateVariablesRecursively();
+
+ return true;
}
bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
@@ -1811,7 +1818,8 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
return var;
}
-Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
+Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
+ Scope* outer_scope_end) {
DCHECK_NE(outer_scope_end, this);
// Short-cut: whenever we find a debug-evaluate scope, just look everything up
// dynamically. Debug-evaluate doesn't properly create scope info for the
@@ -1834,6 +1842,15 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
// We may just be trying to find all free variables. In that case, don't
// declare them in the outer scope.
if (!is_script_scope()) return nullptr;
+
+ if (proxy->is_private_field()) {
+ info->pending_error_handler()->ReportMessageAt(
+ proxy->position(), proxy->position() + 1,
+ MessageTemplate::kInvalidPrivateFieldAccess, proxy->raw_name(),
+ kSyntaxError);
+ return nullptr;
+ }
+
// No binding has been found. Declare a variable on the global object.
return AsDeclarationScope()->DeclareDynamicGlobal(proxy->raw_name(),
NORMAL_VARIABLE);
@@ -1841,7 +1858,7 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
DCHECK(!is_script_scope());
- var = outer_scope_->LookupRecursive(proxy, outer_scope_end);
+ var = outer_scope_->LookupRecursive(info, proxy, outer_scope_end);
// The variable could not be resolved statically.
if (var == nullptr) return var;
@@ -1899,11 +1916,16 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
return var;
}
-void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
+bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
DCHECK(info->script_scope()->is_script_scope());
DCHECK(!proxy->is_resolved());
- Variable* var = LookupRecursive(proxy, nullptr);
+ Variable* var = LookupRecursive(info, proxy, nullptr);
+ if (var == nullptr) {
+ DCHECK(proxy->is_private_field());
+ return false;
+ }
ResolveTo(info, proxy, var);
+ return true;
}
namespace {
@@ -1983,8 +2005,8 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
// The following variable name may be minified. If so, disable
// minification in js2c.py for better output.
Handle<String> name = proxy->raw_name()->string();
- V8_Fatal(__FILE__, __LINE__, "Unbound variable: '%s' in native script.",
- name->ToCString().get());
+ FATAL("Unbound variable: '%s' in native script.",
+ name->ToCString().get());
}
VariableLocation location = var->location();
DCHECK(location == VariableLocation::LOCAL ||
@@ -1999,7 +2021,7 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
proxy->BindTo(var);
}
-void Scope::ResolveVariablesRecursively(ParseInfo* info) {
+bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(info->script_scope()->is_script_scope());
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
@@ -2008,7 +2030,11 @@ void Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK_EQ(variables_.occupancy(), 0);
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
- Variable* var = outer_scope()->LookupRecursive(proxy, nullptr);
+ Variable* var = outer_scope()->LookupRecursive(info, proxy, nullptr);
+ if (var == nullptr) {
+ DCHECK(proxy->is_private_field());
+ return false;
+ }
if (!var->is_dynamic()) {
var->set_is_used();
var->ForceContextAllocation();
@@ -2019,15 +2045,16 @@ void Scope::ResolveVariablesRecursively(ParseInfo* info) {
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
- ResolveVariable(info, proxy);
+ if (!ResolveVariable(info, proxy)) return false;
}
// Resolve unresolved variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
- scope->ResolveVariablesRecursively(info);
+ if (!scope->ResolveVariablesRecursively(info)) return false;
}
}
+ return true;
}
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
@@ -2050,7 +2077,7 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
next = proxy->next_unresolved();
DCHECK(!proxy->is_resolved());
Variable* var =
- lookup->LookupRecursive(proxy, max_outer_scope->outer_scope());
+ lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope());
if (var == nullptr) {
proxy->set_next_unresolved(stack);
stack = proxy;
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index d2e8886319..2ffaaf6752 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -587,10 +587,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// scope, and stopping when reaching the outer_scope_end scope. If the code is
// executed because of a call to 'eval', the context parameter should be set
// to the calling context of 'eval'.
- Variable* LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end);
+ Variable* LookupRecursive(ParseInfo* info, VariableProxy* proxy,
+ Scope* outer_scope_end);
void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
- void ResolveVariable(ParseInfo* info, VariableProxy* proxy);
- void ResolveVariablesRecursively(ParseInfo* info);
+ MUST_USE_RESULT bool ResolveVariable(ParseInfo* info, VariableProxy* proxy);
+ MUST_USE_RESULT bool ResolveVariablesRecursively(ParseInfo* info);
// Finds free variables of this scope. This mutates the unresolved variables
// list along the way, so full resolution cannot be done afterwards.
@@ -849,7 +850,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
- static void Analyze(ParseInfo* info);
+ //
+ // Returns false if private fields can not be resolved and
+ // ParseInfo's pending_error_handler will be populated with an
+ // error. Otherwise, returns true.
+ MUST_USE_RESULT
+ static bool Analyze(ParseInfo* info);
// To be called during parsing. Do just enough scope analysis that we can
// discard the Scope contents for lazily compiled functions. In particular,
@@ -920,7 +926,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// In the case of code compiled and run using 'eval', the context
// parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle.
- void AllocateVariables(ParseInfo* info);
+ //
+ // Returns false if private fields can not be resolved.
+ bool AllocateVariables(ParseInfo* info);
void SetDefaults();
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index c8e81c69d4..51e3708b6a 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -26,6 +26,7 @@ namespace internal {
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
+ V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
@@ -55,8 +56,6 @@ namespace internal {
"Should not directly enter OSR-compiled function") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
- V(kTheCurrentStackPointerIsBelowCsp, \
- "The current stack pointer is below csp") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
@@ -89,6 +88,7 @@ namespace internal {
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
+ V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
V(kWrongFunctionContext, "Wrong context passed to function")
#define BAILOUT_MESSAGES_LIST(V) \
@@ -100,6 +100,7 @@ namespace internal {
"Cyclic object state detected by escape analysis") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
+ V(kFunctionTooBig, "Function is too big to be optimized") \
V(kLiveEdit, "LiveEdit") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNotEnoughVirtualRegistersRegalloc, \
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index d482d75cfb..0fcf818069 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -6,7 +6,7 @@
'../third_party/icu/icu.isolate',
# MSVS runtime libraries.
- '../gypfiles/win/msvs_dependencies.isolate',
+ '../gni/msvs_dependencies.isolate',
],
'conditions': [
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 854d846cc0..5ba1ad4246 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ATOMIC_UTILS_H_
-#define V8_ATOMIC_UTILS_H_
+#ifndef V8_BASE_ATOMIC_UTILS_H_
+#define V8_BASE_ATOMIC_UTILS_H_
#include <limits.h>
#include <type_traits>
@@ -419,4 +419,4 @@ class AtomicElement {
} // namespace base
} // namespace v8
-#endif // #define V8_ATOMIC_UTILS_H_
+#endif // V8_BASE_ATOMIC_UTILS_H_
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
index 0779bfbd25..bd79558313 100644
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -26,8 +26,8 @@
// needs to increment twice (which the compiler should be able to detect and
// optimize).
-#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
#include <atomic>
diff --git a/deps/v8/src/base/atomicops_internals_std.h b/deps/v8/src/base/atomicops_internals_std.h
index f47152aa8c..4ce7b461e0 100644
--- a/deps/v8/src/base/atomicops_internals_std.h
+++ b/deps/v8/src/base/atomicops_internals_std.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ATOMICOPS_INTERNALS_STD_H_
-#define BASE_ATOMICOPS_INTERNALS_STD_H_
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_STD_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_STD_H_
#include <atomic>
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 22e0511dc7..6ab0ffee29 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -335,7 +335,8 @@ CPU::CPU()
has_vfp3_(false),
has_vfp3_d32_(false),
is_fp64_mode_(false),
- has_non_stop_time_stamp_counter_(false) {
+ has_non_stop_time_stamp_counter_(false),
+ has_msa_(false) {
memcpy(vendor_, "Unknown", 8);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
int cpu_info[4];
diff --git a/deps/v8/src/base/file-utils.h b/deps/v8/src/base/file-utils.h
index 271f0ffb05..afd9a1fc25 100644
--- a/deps/v8/src/base/file-utils.h
+++ b/deps/v8/src/base/file-utils.h
@@ -18,4 +18,4 @@ V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path,
} // namespace base
} // namespace v8
-#endif // V8_FILE_UTILS_H_
+#endif // V8_BASE_FILE_UTILS_H_
diff --git a/deps/v8/src/base/format-macros.h b/deps/v8/src/base/format-macros.h
index 5f5fe5df24..e2234684a8 100644
--- a/deps/v8/src/base/format-macros.h
+++ b/deps/v8/src/base/format-macros.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_FORMAT_MACROS_H_
-#define BASE_FORMAT_MACROS_H_
+#ifndef V8_BASE_FORMAT_MACROS_H_
+#define V8_BASE_FORMAT_MACROS_H_
// This file defines the format macros for some integer types.
@@ -94,4 +94,4 @@
#endif
-#endif // BASE_FORMAT_MACROS_H_
+#endif // V8_BASE_FORMAT_MACROS_H_
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index ad5349ac7e..e58fdba09f 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -119,16 +119,50 @@ DEFINE_CHECK_OP_IMPL(GT)
} // namespace base
} // namespace v8
+namespace {
+
+// FailureMessage is a stack allocated object which has a special marker field
+// at the start and at the end. This makes it possible to retrieve the embedded
+// message from the stack.
+//
+class FailureMessage {
+ public:
+ explicit FailureMessage(const char* format, va_list arguments) {
+ memset(&message_, 0, arraysize(message_));
+ v8::base::OS::VSNPrintF(&message_[0], arraysize(message_), format,
+ arguments);
+ }
+
+ static const uintptr_t kStartMarker = 0xdecade10;
+ static const uintptr_t kEndMarker = 0xdecade11;
+ static const int kMessageBufferSize = 1024;
+
+ uintptr_t start_marker_ = kStartMarker;
+ char message_[kMessageBufferSize];
+ uintptr_t end_marker_ = kEndMarker;
+};
+
+} // namespace
+
void V8_Fatal(const char* file, int line, const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ // Format the error message into a stack object for later retrieveal by the
+ // crash processor.
+ FailureMessage message(format, arguments);
+ va_end(arguments);
+
fflush(stdout);
fflush(stderr);
v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file,
line);
- va_list arguments;
+
+ // Print the error message.
va_start(arguments, format);
v8::base::OS::VPrintError(format, arguments);
va_end(arguments);
- v8::base::OS::PrintError("\n#\n");
+ // Print the message object's address to force stack allocation.
+ v8::base::OS::PrintError("\n#\n#\n#\n#FailureMessage Object: %p", &message);
if (v8::base::g_print_stack_trace) v8::base::g_print_stack_trace();
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 5275fdc6a6..a21bc5e423 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -44,11 +44,11 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
//
// We make sure CHECK et al. always evaluates their arguments, as
// doing CHECK(FunctionWithSideEffect()) is a common idiom.
-#define CHECK_WITH_MSG(condition, message) \
- do { \
- if (V8_UNLIKELY(!(condition))) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", message); \
- } \
+#define CHECK_WITH_MSG(condition, message) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ FATAL("Check failed: %s.", message); \
+ } \
} while (0)
#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
@@ -70,7 +70,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
typename ::v8::base::pass_value_or_ref<decltype(lhs)>::type, \
typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>( \
(lhs), (rhs), #lhs " " #op " " #rhs)) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
+ FATAL("Check failed: %s.", _msg->c_str()); \
delete _msg; \
} \
} while (0)
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index a265408d91..9de42131a4 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -338,4 +338,4 @@ bool is_inbounds(float_t v) {
(kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound));
}
-#endif // V8_BASE_MACROS_H_
+#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index a229745f84..ea32c403ac 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -125,7 +125,7 @@ class Optional {
constexpr Optional() {}
- explicit constexpr Optional(base::nullopt_t) {}
+ constexpr Optional(base::nullopt_t) {} // NOLINT(runtime/explicit)
Optional(const Optional& other) {
if (!other.storage_.is_null_) Init(other.value());
@@ -135,10 +135,12 @@ class Optional {
if (!other.storage_.is_null_) Init(std::move(other.value()));
}
- explicit constexpr Optional(const T& value) : storage_(value) {}
+ constexpr Optional(const T& value) // NOLINT(runtime/explicit)
+ : storage_(value) {}
// TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
- explicit Optional(T&& value) : storage_(std::move(value)) {}
+ Optional(T&& value) // NOLINT(runtime/explicit)
+ : storage_(std::move(value)) {}
// TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
template <class... Args>
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index eabd53570f..0d4ec9a10d 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -139,7 +139,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// base will be nullptr.
if (base != nullptr) break;
}
- DCHECK_EQ(base, aligned_base);
+ DCHECK_IMPLIES(base, base == aligned_base);
return reinterpret_cast<void*>(base);
}
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 38a7070e85..bba3f1baba 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -131,5 +131,21 @@ void OS::SignalCodeMovingGC() {
UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
}
+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
+ const auto kNanosPerMicrosecond = 1000ULL;
+ const auto kMicrosPerSecond = 1000000ULL;
+ const zx_time_t nanos_since_thread_started = zx_clock_get(ZX_CLOCK_THREAD);
+
+ // First convert to microseconds, rounding up.
+ const uint64_t micros_since_thread_started =
+ (nanos_since_thread_started + kNanosPerMicrosecond - 1ULL) /
+ kNanosPerMicrosecond;
+
+ *secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
+ *usecs =
+ static_cast<uint32_t>(micros_since_thread_started % kMicrosPerSecond);
+ return 0;
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 5edbd7648b..f85f7fe942 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -18,7 +18,6 @@
#include <unistd.h>
#include <sys/mman.h>
-#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -55,6 +54,12 @@
#include <sys/prctl.h> // NOLINT, for prctl
#endif
+#if defined(V8_OS_FUCHSIA)
+#include <zircon/process.h>
+#else
+#include <sys/resource.h>
+#endif
+
#if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
#include <sys/syscall.h>
#endif
@@ -245,6 +250,10 @@ void* OS::GetRandomMmapAddr() {
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= uint64_t{0x3FFFFFFFF000};
#endif
+#elif V8_TARGET_ARCH_MIPS64
+ // We allocate code in 256 MB aligned segments because of optimizations using
+ // J instruction that require that all code is within a single 256 MB segment
+ raw_addr &= uint64_t{0x3FFFE0000000};
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
@@ -474,7 +483,7 @@ int OS::GetCurrentThreadId() {
#elif V8_OS_AIX
return static_cast<int>(thread_self());
#elif V8_OS_FUCHSIA
- return static_cast<int>(pthread_self());
+ return static_cast<int>(zx_thread_self());
#elif V8_OS_SOLARIS
return static_cast<int>(pthread_self());
#else
@@ -487,6 +496,7 @@ int OS::GetCurrentThreadId() {
// POSIX date/time support.
//
+#if !defined(V8_OS_FUCHSIA)
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
struct rusage usage;
@@ -495,7 +505,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
*usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
return 0;
}
-
+#endif
double OS::TimeCurrentMillis() {
return Time::Now().ToJsTime();
@@ -788,7 +798,7 @@ static void InitializeTlsBaseOffset() {
size_t buffer_size = kBufferSize;
int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ FATAL("V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
// XX is the major kernel version component.
@@ -822,8 +832,7 @@ static void CheckFastTls(Thread::LocalStorageKey key) {
Thread::SetThreadLocal(key, expected);
void* actual = Thread::GetExistingThreadLocal(key);
if (expected != actual) {
- V8_Fatal(__FILE__, __LINE__,
- "V8 failed to initialize fast TLS on current kernel");
+ FATAL("V8 failed to initialize fast TLS on current kernel");
}
Thread::SetThreadLocal(key, nullptr);
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 22580cc407..3f1a586840 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -829,15 +829,14 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// base will be nullptr.
if (base != nullptr) break;
}
- DCHECK_EQ(base, aligned_base);
+ DCHECK_IMPLIES(base, base == aligned_base);
return reinterpret_cast<void*>(base);
}
// static
bool OS::Free(void* address, const size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
- // TODO(bbudge) Add DCHECK_EQ(0, size % AllocatePageSize()) when callers
- // pass the correct size on Windows.
+ DCHECK_EQ(0, size % AllocatePageSize());
USE(size);
return VirtualFree(address, 0, MEM_RELEASE) != 0;
}
@@ -872,6 +871,10 @@ void OS::Sleep(TimeDelta interval) {
void OS::Abort() {
+ // Before aborting, make sure to flush output buffers.
+ fflush(stdout);
+ fflush(stderr);
+
if (g_hard_abort) {
V8_IMMEDIATE_CRASH();
}
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 1fcd7aecce..cf34af646c 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -143,41 +143,83 @@ TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
int TimeDelta::InDays() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
}
-
int TimeDelta::InHours() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
}
-
int TimeDelta::InMinutes() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
}
-
double TimeDelta::InSecondsF() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
}
-
int64_t TimeDelta::InSeconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
return delta_ / Time::kMicrosecondsPerSecond;
}
-
double TimeDelta::InMillisecondsF() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
}
-
int64_t TimeDelta::InMilliseconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
return delta_ / Time::kMicrosecondsPerMillisecond;
}
+int64_t TimeDelta::InMillisecondsRoundedUp() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
+ Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMicroseconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_;
+}
int64_t TimeDelta::InNanoseconds() const {
+ if (IsMax()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
return delta_ * Time::kNanosecondsPerMicrosecond;
}
@@ -415,6 +457,15 @@ struct timeval Time::ToTimeval() const {
#endif // V8_OS_WIN
+// static
+TimeTicks TimeTicks::HighResolutionNow() {
+ // a DCHECK of TimeTicks::IsHighResolution() was removed from here
+ // as it turns out this path is used in the wild for logs and counters.
+ //
+ // TODO(hpayer) We may eventually want to split TimedHistograms based
+ // on low resolution clocks to avoid polluting metrics
+ return TimeTicks::Now();
+}
Time Time::FromJsTime(double ms_since_epoch) {
// The epoch is a valid time, so this constructor doesn't interpret
@@ -447,165 +498,221 @@ std::ostream& operator<<(std::ostream& os, const Time& time) {
#if V8_OS_WIN
-class TickClock {
- public:
- virtual ~TickClock() {}
- virtual int64_t Now() = 0;
- virtual bool IsHighResolution() = 0;
+namespace {
+
+// We define a wrapper to adapt between the __stdcall and __cdecl call of the
+// mock function, and to avoid a static constructor. Assigning an import to a
+// function pointer directly would require setup code to fetch from the IAT.
+DWORD timeGetTimeWrapper() { return timeGetTime(); }
+
+DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
+
+// A structure holding the most significant bits of "last seen" and a
+// "rollover" counter.
+union LastTimeAndRolloversState {
+ // The state as a single 32-bit opaque value.
+ base::Atomic32 as_opaque_32;
+
+ // The state as usable values.
+ struct {
+ // The top 8-bits of the "last" time. This is enough to check for rollovers
+ // and the small bit-size means fewer CompareAndSwap operations to store
+ // changes in state, which in turn makes for fewer retries.
+ uint8_t last_8;
+ // A count of the number of detected rollovers. Using this as bits 47-32
+ // of the upper half of a 64-bit value results in a 48-bit tick counter.
+ // This extends the total rollover period from about 49 days to about 8800
+ // years while still allowing it to be stored with last_8 in a single
+ // 32-bit value.
+ uint16_t rollovers;
+ } as_values;
};
+base::Atomic32 g_last_time_and_rollovers = 0;
+static_assert(sizeof(LastTimeAndRolloversState) <=
+ sizeof(g_last_time_and_rollovers),
+ "LastTimeAndRolloversState does not fit in a single atomic word");
+
+// We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
+// because it returns the number of milliseconds since Windows has started,
+// which will roll over the 32-bit value every ~49 days. We try to track
+// rollover ourselves, which works if TimeTicks::Now() is called at least every
+// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
+TimeTicks RolloverProtectedNow() {
+ LastTimeAndRolloversState state;
+ DWORD now; // DWORD is always unsigned 32 bits.
+
+ while (true) {
+ // Fetch the "now" and "last" tick values, updating "last" with "now" and
+ // incrementing the "rollovers" counter if the tick-value has wrapped back
+ // around. Atomic operations ensure that both "last" and "rollovers" are
+ // always updated together.
+ int32_t original = base::Acquire_Load(&g_last_time_and_rollovers);
+ state.as_opaque_32 = original;
+ now = g_tick_function();
+ uint8_t now_8 = static_cast<uint8_t>(now >> 24);
+ if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
+ state.as_values.last_8 = now_8;
+
+ // If the state hasn't changed, exit the loop.
+ if (state.as_opaque_32 == original) break;
+
+ // Save the changed state. If the existing value is unchanged from the
+ // original, exit the loop.
+ int32_t check = base::Release_CompareAndSwap(&g_last_time_and_rollovers,
+ original, state.as_opaque_32);
+ if (check == original) break;
+
+ // Another thread has done something in between so retry from the top.
+ }
+ return TimeTicks() +
+ TimeDelta::FromMilliseconds(
+ now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
+}
-// Overview of time counters:
+// Discussion of tick counter options on Windows:
+//
// (1) CPU cycle counter. (Retrieved via RDTSC)
// The CPU counter provides the highest resolution time stamp and is the least
-// expensive to retrieve. However, the CPU counter is unreliable and should not
-// be used in production. Its biggest issue is that it is per processor and it
-// is not synchronized between processors. Also, on some computers, the counters
-// will change frequency due to thermal and power changes, and stop in some
-// states.
+// expensive to retrieve. However, on older CPUs, two issues can affect its
+// reliability: First it is maintained per processor and not synchronized
+// between processors. Also, the counters will change frequency due to thermal
+// and power changes, and stop in some states.
//
// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
-// resolution (100 nanoseconds) time stamp but is comparatively more expensive
-// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
-// (with some help from ACPI).
-// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
-// in the worst case, it gets the counter from the rollover interrupt on the
+// resolution (<1 microsecond) time stamp. On most hardware running today, it
+// auto-detects and uses the constant-rate RDTSC counter to provide extremely
+// efficient and reliable time stamps.
+//
+// On older CPUs where RDTSC is unreliable, it falls back to using more
+// expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
+// PM timer, and can involve system calls; and all this is up to the HAL (with
+// some help from ACPI). According to
+// http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
+// worst case, it gets the counter from the rollover interrupt on the
// programmable interrupt timer. In best cases, the HAL may conclude that the
// RDTSC counter runs at a constant frequency, then it uses that instead. On
// multiprocessor machines, it will try to verify the values returned from
// RDTSC on each processor are consistent with each other, and apply a handful
// of workarounds for known buggy hardware. In other words, QPC is supposed to
-// give consistent result on a multiprocessor computer, but it is unreliable in
-// reality due to bugs in BIOS or HAL on some, especially old computers.
-// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
-// it should be used with caution.
+// give consistent results on a multiprocessor computer, but for older CPUs it
+// can be unreliable due bugs in BIOS or HAL.
//
-// (3) System time. The system time provides a low-resolution (typically 10ms
-// to 55 milliseconds) time stamp but is comparatively less expensive to
-// retrieve and more reliable.
-class HighResolutionTickClock final : public TickClock {
- public:
- explicit HighResolutionTickClock(int64_t ticks_per_second)
- : ticks_per_second_(ticks_per_second) {
- DCHECK_LT(0, ticks_per_second);
+// (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
+// milliseconds) time stamp but is comparatively less expensive to retrieve and
+// more reliable. Time::EnableHighResolutionTimer() and
+// Time::ActivateHighResolutionTimer() can be called to alter the resolution of
+// this timer; and also other Windows applications can alter it, affecting this
+// one.
+
+TimeTicks InitialTimeTicksNowFunction();
+
+// See "threading notes" in InitializeNowFunctionPointer() for details on how
+// concurrent reads/writes to these globals has been made safe.
+using TimeTicksNowFunction = decltype(&TimeTicks::Now);
+TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
+int64_t g_qpc_ticks_per_second = 0;
+
+// As of January 2015, use of <atomic> is forbidden in Chromium code. This is
+// what std::atomic_thread_fence does on Windows on all Intel architectures when
+// the memory_order argument is anything but std::memory_order_seq_cst:
+#define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
+
+TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
+ // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
+ // InitializeNowFunctionPointer(), has happened by this point.
+ ATOMIC_THREAD_FENCE(memory_order_acquire);
+
+ DCHECK_GT(g_qpc_ticks_per_second, 0);
+
+ // If the QPC Value is below the overflow threshold, we proceed with
+ // simple multiply and divide.
+ if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
+ return TimeDelta::FromMicroseconds(
+ qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
}
- virtual ~HighResolutionTickClock() {}
-
- int64_t Now() override {
- uint64_t now = QPCNowRaw();
-
- // Intentionally calculate microseconds in a round about manner to avoid
- // overflow and precision issues. Think twice before simplifying!
- int64_t whole_seconds = now / ticks_per_second_;
- int64_t leftover_ticks = now % ticks_per_second_;
- int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
- ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
-
- // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
- // will never return 0.
- return ticks + 1;
- }
-
- bool IsHighResolution() override { return true; }
+ // Otherwise, calculate microseconds in a round about manner to avoid
+ // overflow and precision issues.
+ int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
+ int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
+ return TimeDelta::FromMicroseconds(
+ (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
+ ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
+ g_qpc_ticks_per_second));
+}
- private:
- int64_t ticks_per_second_;
-};
+TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
+bool IsBuggyAthlon(const CPU& cpu) {
+ // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
+ return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
+}
-class RolloverProtectedTickClock final : public TickClock {
- public:
- RolloverProtectedTickClock() : rollover_(0) {}
- virtual ~RolloverProtectedTickClock() {}
-
- int64_t Now() override {
- // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
- // every ~49.7 days. We try to track rollover ourselves, which works if
- // TimeTicks::Now() is called at least every 24 days.
- // Note that we do not use GetTickCount() here, since timeGetTime() gives
- // more predictable delta values, as described here:
- // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for V8 wants fast timers, it
- // can use timeBeginPeriod() to increase the resolution.
- // We use a lock-free version because the sampler thread calls it
- // while having the rest of the world stopped, that could cause a deadlock.
- base::Atomic32 rollover = base::Acquire_Load(&rollover_);
- uint32_t now = static_cast<uint32_t>(timeGetTime());
- if ((now >> 31) != static_cast<uint32_t>(rollover & 1)) {
- base::Release_CompareAndSwap(&rollover_, rollover, rollover + 1);
- ++rollover;
- }
- uint64_t ms = (static_cast<uint64_t>(rollover) << 31) | now;
- return static_cast<int64_t>(ms * Time::kMicrosecondsPerMillisecond);
+void InitializeTimeTicksNowFunctionPointer() {
+ LARGE_INTEGER ticks_per_sec = {};
+ if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
+
+ // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
+ // the low-resolution clock.
+ //
+ // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
+ // will still use the low-resolution clock. A CPU lacking a non-stop time
+ // counter will cause Windows to provide an alternate QPC implementation that
+ // works, but is expensive to use. Certain Athlon CPUs are known to make the
+ // QPC implementation unreliable.
+ //
+ // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
+ // ~72% of users fall within this category.
+ TimeTicksNowFunction now_function;
+ CPU cpu;
+ if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
+ IsBuggyAthlon(cpu)) {
+ now_function = &RolloverProtectedNow;
+ } else {
+ now_function = &QPCNow;
}
- bool IsHighResolution() override { return false; }
-
- private:
- base::Atomic32 rollover_;
-};
-
-
-static LazyStaticInstance<RolloverProtectedTickClock,
- DefaultConstructTrait<RolloverProtectedTickClock>,
- ThreadSafeInitOnceTrait>::type tick_clock =
- LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-struct CreateHighResTickClockTrait {
- static TickClock* Create() {
- // Check if the installed hardware supports a high-resolution performance
- // counter, and if not fallback to the low-resolution tick clock.
- LARGE_INTEGER ticks_per_second;
- if (!QueryPerformanceFrequency(&ticks_per_second)) {
- return tick_clock.Pointer();
- }
-
- // If QPC not reliable, fallback to low-resolution tick clock.
- if (IsQPCReliable()) {
- return tick_clock.Pointer();
- }
+ // Threading note 1: In an unlikely race condition, it's possible for two or
+ // more threads to enter InitializeNowFunctionPointer() in parallel. This is
+ // not a problem since all threads should end up writing out the same values
+ // to the global variables.
+ //
+ // Threading note 2: A release fence is placed here to ensure, from the
+ // perspective of other threads using the function pointers, that the
+ // assignment to |g_qpc_ticks_per_second| happens before the function pointers
+ // are changed.
+ g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
+ ATOMIC_THREAD_FENCE(memory_order_release);
+ g_time_ticks_now_function = now_function;
+}
- return new HighResolutionTickClock(ticks_per_second.QuadPart);
- }
-};
+TimeTicks InitialTimeTicksNowFunction() {
+ InitializeTimeTicksNowFunctionPointer();
+ return g_time_ticks_now_function();
+}
+#undef ATOMIC_THREAD_FENCE
-static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
- ThreadSafeInitOnceTrait>::type high_res_tick_clock =
- LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+} // namespace
// static
TimeTicks TimeTicks::Now() {
// Make sure we never return 0 here.
- TimeTicks ticks(tick_clock.Pointer()->Now());
+ TimeTicks ticks(g_time_ticks_now_function());
DCHECK(!ticks.IsNull());
return ticks;
}
// static
-TimeTicks TimeTicks::HighResolutionNow() {
- // Make sure we never return 0 here.
- TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
- DCHECK(!ticks.IsNull());
- return ticks;
-}
-
-
-// static
-bool TimeTicks::IsHighResolutionClockWorking() {
- return high_res_tick_clock.Pointer()->IsHighResolution();
+bool TimeTicks::IsHighResolution() {
+ if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
+ InitializeTimeTicksNowFunctionPointer();
+ return g_time_ticks_now_function == &QPCNow;
}
#else // V8_OS_WIN
TimeTicks TimeTicks::Now() {
- return HighResolutionNow();
-}
-
-
-TimeTicks TimeTicks::HighResolutionNow() {
int64_t ticks;
#if V8_OS_MACOSX
static struct mach_timebase_info info;
@@ -627,11 +734,8 @@ TimeTicks TimeTicks::HighResolutionNow() {
return TimeTicks(ticks + 1);
}
-
// static
-bool TimeTicks::IsHighResolutionClockWorking() {
- return true;
-}
+bool TimeTicks::IsHighResolution() { return true; }
#endif // V8_OS_WIN
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index 25dee1c419..161092ad8b 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_PLATFORM_TIME_H_
#define V8_BASE_PLATFORM_TIME_H_
+#include <stdint.h>
+
#include <ctime>
#include <iosfwd>
#include <limits>
@@ -45,7 +47,7 @@ class TimeBase;
class V8_BASE_EXPORT TimeDelta final {
public:
- TimeDelta() : delta_(0) {}
+ constexpr TimeDelta() : delta_(0) {}
// Converts units of time to TimeDeltas.
static TimeDelta FromDays(int days);
@@ -58,6 +60,27 @@ class V8_BASE_EXPORT TimeDelta final {
}
static TimeDelta FromNanoseconds(int64_t nanoseconds);
+ // Returns the maximum time delta, which should be greater than any reasonable
+ // time delta we might compare it to. Adding or subtracting the maximum time
+ // delta to a time or another time delta has an undefined result.
+ static constexpr TimeDelta Max();
+
+ // Returns the minimum time delta, which should be less than than any
+ // reasonable time delta we might compare it to. Adding or subtracting the
+ // minimum time delta to a time or another time delta has an undefined result.
+ static constexpr TimeDelta Min();
+
+ // Returns true if the time delta is zero.
+ constexpr bool IsZero() const { return delta_ == 0; }
+
+ // Returns true if the time delta is the maximum/minimum time delta.
+ constexpr bool IsMax() const {
+ return delta_ == std::numeric_limits<int64_t>::max();
+ }
+ constexpr bool IsMin() const {
+ return delta_ == std::numeric_limits<int64_t>::min();
+ }
+
// Returns the time delta in some unit. The F versions return a floating
// point value, the "regular" versions return a rounded-down value.
//
@@ -71,7 +94,7 @@ class V8_BASE_EXPORT TimeDelta final {
double InMillisecondsF() const;
int64_t InMilliseconds() const;
int64_t InMillisecondsRoundedUp() const;
- int64_t InMicroseconds() const { return delta_; }
+ int64_t InMicroseconds() const;
int64_t InNanoseconds() const;
// Converts to/from Mach time specs.
@@ -103,9 +126,7 @@ class V8_BASE_EXPORT TimeDelta final {
delta_ -= other.delta_;
return *this;
}
- TimeDelta operator-() const {
- return TimeDelta(-delta_);
- }
+ constexpr TimeDelta operator-() const { return TimeDelta(-delta_); }
double TimesOf(const TimeDelta& other) const {
return static_cast<double>(delta_) / static_cast<double>(other.delta_);
@@ -135,22 +156,22 @@ class V8_BASE_EXPORT TimeDelta final {
}
// Comparison operators.
- bool operator==(const TimeDelta& other) const {
+ constexpr bool operator==(const TimeDelta& other) const {
return delta_ == other.delta_;
}
- bool operator!=(const TimeDelta& other) const {
+ constexpr bool operator!=(const TimeDelta& other) const {
return delta_ != other.delta_;
}
- bool operator<(const TimeDelta& other) const {
+ constexpr bool operator<(const TimeDelta& other) const {
return delta_ < other.delta_;
}
- bool operator<=(const TimeDelta& other) const {
+ constexpr bool operator<=(const TimeDelta& other) const {
return delta_ <= other.delta_;
}
- bool operator>(const TimeDelta& other) const {
+ constexpr bool operator>(const TimeDelta& other) const {
return delta_ > other.delta_;
}
- bool operator>=(const TimeDelta& other) const {
+ constexpr bool operator>=(const TimeDelta& other) const {
return delta_ >= other.delta_;
}
@@ -159,12 +180,21 @@ class V8_BASE_EXPORT TimeDelta final {
// Constructs a delta given the duration in microseconds. This is private
// to avoid confusion by callers with an integer constructor. Use
// FromSeconds, FromMilliseconds, etc. instead.
- explicit TimeDelta(int64_t delta) : delta_(delta) {}
+ explicit constexpr TimeDelta(int64_t delta) : delta_(delta) {}
// Delta in microseconds.
int64_t delta_;
};
+// static
+constexpr TimeDelta TimeDelta::Max() {
+ return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
+// static
+constexpr TimeDelta TimeDelta::Min() {
+ return TimeDelta(std::numeric_limits<int64_t>::min());
+}
namespace time_internal {
@@ -177,33 +207,52 @@ namespace time_internal {
template<class TimeClass>
class TimeBase {
public:
- static const int64_t kHoursPerDay = 24;
- static const int64_t kMillisecondsPerSecond = 1000;
- static const int64_t kMillisecondsPerDay =
+ static constexpr int64_t kHoursPerDay = 24;
+ static constexpr int64_t kMillisecondsPerSecond = 1000;
+ static constexpr int64_t kMillisecondsPerDay =
kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
- static const int64_t kMicrosecondsPerMillisecond = 1000;
- static const int64_t kMicrosecondsPerSecond =
+ static constexpr int64_t kMicrosecondsPerMillisecond = 1000;
+ static constexpr int64_t kMicrosecondsPerSecond =
kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
- static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
- static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
- static const int64_t kMicrosecondsPerDay =
+ static constexpr int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static constexpr int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static constexpr int64_t kMicrosecondsPerDay =
kMicrosecondsPerHour * kHoursPerDay;
- static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
- static const int64_t kNanosecondsPerMicrosecond = 1000;
- static const int64_t kNanosecondsPerSecond =
+ static constexpr int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static constexpr int64_t kNanosecondsPerMicrosecond = 1000;
+ static constexpr int64_t kNanosecondsPerSecond =
kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+#if V8_OS_WIN
+ // To avoid overflow in QPC to Microseconds calculations, since we multiply
+ // by kMicrosecondsPerSecond, then the QPC value should not exceed
+ // (2^63 - 1) / 1E6. If it exceeds that threshold, we divide then multiply.
+ static constexpr int64_t kQPCOverflowThreshold = INT64_C(0x8637BD05AF7);
+#endif
+
// Returns true if this object has not been initialized.
//
// Warning: Be careful when writing code that performs math on time values,
// since it's possible to produce a valid "zero" result that should not be
// interpreted as a "null" value.
- bool IsNull() const {
- return us_ == 0;
+ constexpr bool IsNull() const { return us_ == 0; }
+
+ // Returns the maximum/minimum times, which should be greater/less than any
+ // reasonable time with which we might compare it.
+ static TimeClass Max() {
+ return TimeClass(std::numeric_limits<int64_t>::max());
+ }
+ static TimeClass Min() {
+ return TimeClass(std::numeric_limits<int64_t>::min());
}
- // Returns true if this object represents the maximum time.
- bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+ // Returns true if this object represents the maximum/minimum time.
+ constexpr bool IsMax() const {
+ return us_ == std::numeric_limits<int64_t>::max();
+ }
+ constexpr bool IsMin() const {
+ return us_ == std::numeric_limits<int64_t>::min();
+ }
// For serializing only. Use FromInternalValue() to reconstitute. Please don't
// use this and do arithmetic on it, as it is more error prone than using the
@@ -263,7 +312,7 @@ class TimeBase {
static TimeClass FromInternalValue(int64_t us) { return TimeClass(us); }
protected:
- explicit TimeBase(int64_t us) : us_(us) {}
+ explicit constexpr TimeBase(int64_t us) : us_(us) {}
// Time value in a microsecond timebase.
int64_t us_;
@@ -281,7 +330,7 @@ class TimeBase {
class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
public:
// Contains the nullptr time. Use Time::Now() to get the current time.
- Time() : TimeBase(0) {}
+ constexpr Time() : TimeBase(0) {}
// Returns the current time. Watch out, the system might adjust its clock
// in which case time will actually go backwards. We don't guarantee that
@@ -297,10 +346,6 @@ class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
// Returns the time for epoch in Unix-like system (Jan 1, 1970).
static Time UnixEpoch() { return Time(0); }
- // Returns the maximum time, which should be greater than any reasonable time
- // with which we might compare it.
- static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
-
// Converts to/from POSIX time specs.
static Time FromTimespec(struct timespec ts);
struct timespec ToTimespec() const;
@@ -320,7 +365,7 @@ class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
private:
friend class time_internal::TimeBase<Time>;
- explicit Time(int64_t us) : TimeBase(us) {}
+ explicit constexpr Time(int64_t us) : TimeBase(us) {}
};
V8_BASE_EXPORT std::ostream& operator<<(std::ostream&, const Time&);
@@ -343,30 +388,29 @@ inline Time operator+(const TimeDelta& delta, const Time& time) {
class V8_BASE_EXPORT TimeTicks final
: public time_internal::TimeBase<TimeTicks> {
public:
- TimeTicks() : TimeBase(0) {}
+ constexpr TimeTicks() : TimeBase(0) {}
- // Platform-dependent tick count representing "right now."
- // The resolution of this clock is ~1-15ms. Resolution varies depending
- // on hardware/operating system configuration.
+ // Platform-dependent tick count representing "right now." When
+ // IsHighResolution() returns false, the resolution of the clock could be as
+ // coarse as ~15.6ms. Otherwise, the resolution should be no worse than one
+ // microsecond.
// This method never returns a null TimeTicks.
static TimeTicks Now();
- // Returns a platform-dependent high-resolution tick count. Implementation
- // is hardware dependent and may or may not return sub-millisecond
- // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
- // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
- // This method never returns a null TimeTicks.
+ // This is equivalent to Now() but DCHECKs that IsHighResolution(). Useful for
+ // test frameworks that rely on high resolution clocks (in practice all
+ // platforms but low-end Windows devices have high resolution clocks).
static TimeTicks HighResolutionNow();
// Returns true if the high-resolution clock is working on this system.
- static bool IsHighResolutionClockWorking();
+ static bool IsHighResolution();
private:
friend class time_internal::TimeBase<TimeTicks>;
// Please use Now() to create a new object. This is for internal use
// and testing. Ticks are in microseconds.
- explicit TimeTicks(int64_t ticks) : TimeBase(ticks) {}
+ explicit constexpr TimeTicks(int64_t ticks) : TimeBase(ticks) {}
};
inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
@@ -381,7 +425,7 @@ inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
class V8_BASE_EXPORT ThreadTicks final
: public time_internal::TimeBase<ThreadTicks> {
public:
- ThreadTicks() : TimeBase(0) {}
+ constexpr ThreadTicks() : TimeBase(0) {}
// Returns true if ThreadTicks::Now() is supported on this system.
static bool IsSupported();
@@ -416,7 +460,7 @@ class V8_BASE_EXPORT ThreadTicks final
// Please use Now() or GetForThread() to create a new object. This is for
// internal use and testing. Ticks are in microseconds.
- explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
+ explicit constexpr ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
#if V8_OS_WIN
// Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
diff --git a/deps/v8/src/base/qnx-math.h b/deps/v8/src/base/qnx-math.h
index 6ff18f8d12..1503c164fa 100644
--- a/deps/v8/src/base/qnx-math.h
+++ b/deps/v8/src/base/qnx-math.h
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#ifndef V8_BASE_QNX_MATH_H_
-#define V8_QBASE_NX_MATH_H_
+#define V8_BASE_QNX_MATH_H_
#include <cmath>
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index 28ff780dd3..3d47ebe8f7 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -5,11 +5,13 @@
#include "src/base/sys-info.h"
#if V8_OS_POSIX
-#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
+#if !V8_OS_FUCHSIA
+#include <sys/resource.h>
+#endif
#endif
#if V8_OS_BSD
@@ -101,7 +103,7 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfVirtualMemory() {
-#if V8_OS_WIN
+#if V8_OS_WIN || V8_OS_FUCHSIA
return 0;
#elif V8_OS_POSIX
struct rlimit rlim;
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index 18850695cb..18b50fe70c 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BASE_TEMPLATE_UTILS_H
-#define V8_BASE_TEMPLATE_UTILS_H
+#ifndef V8_BASE_TEMPLATE_UTILS_H_
+#define V8_BASE_TEMPLATE_UTILS_H_
#include <array>
#include <memory>
@@ -131,4 +131,4 @@ constexpr auto fold(Func func, Ts&&... more) ->
} // namespace base
} // namespace v8
-#endif // V8_BASE_TEMPLATE_UTILS_H
+#endif // V8_BASE_TEMPLATE_UTILS_H_
diff --git a/deps/v8/src/base/v8-fallthrough.h b/deps/v8/src/base/v8-fallthrough.h
new file mode 100644
index 0000000000..f61238de06
--- /dev/null
+++ b/deps/v8/src/base/v8-fallthrough.h
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_V8_FALLTHROUGH_H_
+#define V8_BASE_V8_FALLTHROUGH_H_
+
+// When clang suggests inserting [[clang::fallthrough]], it first checks if
+// it knows of a macro expanding to it, and if so suggests inserting the
+// macro. This means that this macro must be used only in code internal
+// to v8, so that v8's user code doesn't end up getting suggestions
+// for V8_FALLTHROUGH instead of the user-specific fallthrough macro.
+// So do not include this header in any of v8's public headers -- only
+// use it in src/, not in include/.
+#if defined(__clang__)
+#define V8_FALLTHROUGH [[clang::fallthrough]] // NOLINT(whitespace/braces)
+#else
+#define V8_FALLTHROUGH
+#endif
+
+#endif // V8_BASE_V8_FALLTHROUGH_H_
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index 71d69b20c2..ef87600753 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATAFLOW_H_
-#define V8_DATAFLOW_H_
+#ifndef V8_BIT_VECTOR_H_
+#define V8_BIT_VECTOR_H_
#include "src/allocation.h"
#include "src/zone/zone.h"
@@ -370,4 +370,4 @@ class GrowableBitVector BASE_EMBEDDED {
} // namespace internal
} // namespace v8
-#endif // V8_DATAFLOW_H_
+#endif // V8_BIT_VECTOR_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 2bc833fe29..ff4beae7fd 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -369,15 +369,15 @@ V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
}
// Construct case.
-V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
- Isolate* isolate, Builtins::Name builtin_id, Handle<String> name,
- Handle<String> instance_class_name, int len) {
+V8_NOINLINE Handle<SharedFunctionInfo>
+SimpleCreateConstructorSharedFunctionInfo(Isolate* isolate,
+ Builtins::Name builtin_id,
+ Handle<String> name, int len) {
Handle<Code> code = isolate->builtins()->builtin_handle(builtin_id);
const bool kIsConstructor = true;
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
name, code, kIsConstructor, kNormalFunction, builtin_id);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(*instance_class_name);
shared->set_internal_formal_parameter_count(len);
shared->set_length(len);
return shared;
@@ -389,9 +389,6 @@ V8_NOINLINE void InstallFunction(Handle<JSObject> target,
Handle<String> function_name,
PropertyAttributes attributes = DONT_ENUM) {
JSObject::AddProperty(target, property_name, function, attributes);
- if (target->IsJSGlobalObject()) {
- function->shared()->set_instance_class_name(*function_name);
- }
}
V8_NOINLINE void InstallFunction(Handle<JSObject> target,
@@ -1279,8 +1276,6 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
isolate(), global_constructor, factory()->the_hole_value(),
ApiNatives::GlobalProxyType);
}
- Handle<String> global_name = factory()->global_string();
- global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
global_proxy_function->initial_map()->set_has_hidden_prototype(true);
global_proxy_function->initial_map()->set_may_have_interesting_symbols(true);
@@ -1347,7 +1342,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<JSFunction> error_fun = InstallFunction(
global, name, JS_ERROR_TYPE, JSObject::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
- error_fun->shared()->set_instance_class_name(*factory->Error_string());
error_fun->shared()->DontAdaptArguments();
error_fun->shared()->SetConstructStub(
*BUILTIN_CODE(isolate, ErrorConstructor));
@@ -1508,9 +1502,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
object_function, "keys", Builtins::kObjectKeys, 1, true);
native_context()->set_object_keys(*object_keys);
SimpleInstallFunction(object_function, factory->entries_string(),
- Builtins::kObjectEntries, 1, false);
+ Builtins::kObjectEntries, 1, true);
SimpleInstallFunction(object_function, factory->values_string(),
- Builtins::kObjectValues, 1, false);
+ Builtins::kObjectValues, 1, true);
SimpleInstallFunction(isolate->initial_object_prototype(),
"__defineGetter__", Builtins::kObjectDefineGetter, 2,
@@ -1611,50 +1605,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
}
- { // --- A s y n c G e n e r a t o r ---
- Handle<JSFunction> await_caught =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitCaught, 1, false);
- native_context()->set_async_generator_await_caught(*await_caught);
-
- Handle<JSFunction> await_uncaught =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorAwaitUncaught, 1, false);
- native_context()->set_async_generator_await_uncaught(*await_uncaught);
-
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorAwaitResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_await_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorAwaitRejectClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_await_reject_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorYieldResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_yield_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_resolve_shared_fun(*info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnClosedResolveClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_closed_resolve_shared_fun(
- *info);
-
- info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncGeneratorReturnClosedRejectClosure,
- factory->empty_string(), 1);
- native_context()->set_async_generator_return_closed_reject_shared_fun(
- *info);
- }
-
{ // --- A r r a y ---
Handle<JSFunction> array_function = InstallFunction(
global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
@@ -1705,6 +1655,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
array_function, "isArray", Builtins::kArrayIsArray, 1, true);
native_context()->set_is_arraylike(*is_arraylike);
+ SimpleInstallFunction(array_function, "from", Builtins::kArrayFrom, 1,
+ false);
+ SimpleInstallFunction(array_function, "of", Builtins::kArrayOf, 0, false);
+
JSObject::AddProperty(proto, factory->constructor_string(), array_function,
DONT_ENUM);
@@ -1768,8 +1722,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize, 0,
array_iterator_prototype, Builtins::kIllegal);
array_iterator_function->shared()->set_native(false);
- array_iterator_function->shared()->set_instance_class_name(
- isolate->heap()->ArrayIterator_string());
native_context()->set_initial_array_iterator_prototype(
*array_iterator_prototype);
@@ -1791,6 +1743,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
V(FLOAT32_ARRAY, KEY_VALUE, float32_array, key_value) \
V(FLOAT64_ARRAY, KEY_VALUE, float64_array, key_value) \
V(UINT8_CLAMPED_ARRAY, KEY_VALUE, uint8_clamped_array, key_value) \
+ V(BIGUINT64_ARRAY, KEY_VALUE, biguint64_array, key_value) \
+ V(BIGINT64_ARRAY, KEY_VALUE, bigint64_array, key_value) \
V(FAST_SMI_ARRAY, KEY_VALUE, fast_smi_array, key_value) \
V(FAST_HOLEY_SMI_ARRAY, KEY_VALUE, fast_holey_smi_array, key_value) \
V(FAST_ARRAY, KEY_VALUE, fast_array, key_value) \
@@ -1807,6 +1761,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
V(FLOAT32_ARRAY, VALUE, float32_array, value) \
V(FLOAT64_ARRAY, VALUE, float64_array, value) \
V(UINT8_CLAMPED_ARRAY, VALUE, uint8_clamped_array, value) \
+ V(BIGUINT64_ARRAY, VALUE, biguint64_array, value) \
+ V(BIGINT64_ARRAY, VALUE, bigint64_array, value) \
V(FAST_SMI_ARRAY, VALUE, fast_smi_array, value) \
V(FAST_HOLEY_SMI_ARRAY, VALUE, fast_holey_smi_array, value) \
V(FAST_ARRAY, VALUE, fast_array, value) \
@@ -2102,9 +2058,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
false);
SimpleInstallFunction(prototype, "trimLeft",
- Builtins::kStringPrototypeTrimLeft, 0, false);
+ Builtins::kStringPrototypeTrimStart, 0, false);
SimpleInstallFunction(prototype, "trimRight",
- Builtins::kStringPrototypeTrimRight, 0, false);
+ Builtins::kStringPrototypeTrimEnd, 0, false);
#ifdef V8_INTL_SUPPORT
SimpleInstallFunction(prototype, "toLowerCase",
Builtins::kStringPrototypeToLowerCaseIntl, 0, true);
@@ -2355,16 +2311,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info = SimpleCreateConstructorSharedFunctionInfo(
isolate, Builtins::kPromiseGetCapabilitiesExecutor,
- factory->empty_string(), factory->Object_string(), 2);
+ factory->empty_string(), 2);
native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
-
- // %new_promise_capability(C, debugEvent)
- Handle<JSFunction> new_promise_capability =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kNewPromiseCapability, 2, false);
- native_context()->set_new_promise_capability(*new_promise_capability);
}
{ // -- P r o m i s e
@@ -2376,7 +2326,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->Object_string());
shared->set_internal_formal_parameter_count(1);
shared->set_length(1);
@@ -2387,7 +2336,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(promise_fun, "race", Builtins::kPromiseRace, 1, true);
SimpleInstallFunction(promise_fun, "resolve",
- Builtins::kPromiseResolveWrapper, 1, true);
+ Builtins::kPromiseResolveTrampoline, 1, true);
SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
true);
@@ -2395,6 +2344,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Setup %PromisePrototype%.
Handle<JSObject> prototype(
JSObject::cast(promise_fun->instance_prototype()));
+ native_context()->set_promise_prototype(*prototype);
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
@@ -2423,64 +2373,25 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Map> prototype_map(prototype->map());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
- // Store the initial Promise.prototype map. This is used in fast-path
- // checks. Do not alter the prototype after this point.
- native_context()->set_promise_prototype_map(*prototype_map);
-
- { // Internal: PromiseInternalConstructor
- // Also exposed as extrasUtils.createPromise.
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kPromiseInternalConstructor, 1, true);
- function->shared()->set_native(false);
- native_context()->set_promise_internal_constructor(*function);
- }
-
{ // Internal: IsPromise
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(), Builtins::kIsPromise, 1, false);
native_context()->set_is_promise(*function);
}
- { // Internal: ResolvePromise
- // Also exposed as extrasUtils.resolvePromise.
- Handle<JSFunction> function = SimpleCreateFunction(
- isolate, factory->empty_string(), Builtins::kResolvePromise, 2, true);
- function->shared()->set_native(false);
- native_context()->set_promise_resolve(*function);
- }
-
- { // Internal: PromiseHandle
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kPromiseHandleJS, 5, false);
- native_context()->set_promise_handle(*function);
- }
-
- { // Internal: PromiseHandleReject
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kPromiseHandleReject, 3, false);
- native_context()->set_promise_handle_reject(*function);
- }
-
- { // Internal: InternalPromiseReject
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kInternalPromiseReject, 3, true);
- function->shared()->set_native(false);
- native_context()->set_promise_internal_reject(*function);
- }
-
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kPromiseResolveClosure, factory->empty_string(),
- 1);
- native_context()->set_promise_resolve_shared_fun(*info);
+ isolate, Builtins::kPromiseCapabilityDefaultResolve,
+ factory->empty_string(), 1);
+ info->set_native(true);
+ native_context()->set_promise_capability_default_resolve_shared_fun(
+ *info);
info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kPromiseRejectClosure, factory->empty_string(), 1);
- native_context()->set_promise_reject_shared_fun(*info);
+ isolate, Builtins::kPromiseCapabilityDefaultReject,
+ factory->empty_string(), 1);
+ info->set_native(true);
+ native_context()->set_promise_capability_default_reject_shared_fun(*info);
}
{
@@ -2512,7 +2423,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->RegExp_string());
shared->set_internal_formal_parameter_count(2);
shared->set_length(2);
@@ -2962,7 +2872,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
#endif // V8_INTL_SUPPORT
{ // -- A r r a y B u f f e r
- Handle<String> name = factory->InternalizeUtf8String("ArrayBuffer");
+ Handle<String> name = factory->ArrayBuffer_string();
Handle<JSFunction> array_buffer_fun = CreateArrayBuffer(name, ARRAY_BUFFER);
JSObject::AddProperty(global, name, array_buffer_fun, DONT_ENUM);
InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
@@ -2978,7 +2888,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- S h a r e d A r r a y B u f f e r
- Handle<String> name = factory->InternalizeUtf8String("SharedArrayBuffer");
+ Handle<String> name = factory->SharedArrayBuffer_string();
Handle<JSFunction> shared_array_buffer_fun =
CreateArrayBuffer(name, SHARED_ARRAY_BUFFER);
InstallWithIntrinsicDefaultProto(isolate, shared_array_buffer_fun,
@@ -3025,6 +2935,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(typed_array_fun);
native_context()->set_typed_array_function(*typed_array_fun);
+ SimpleInstallFunction(typed_array_fun, "of", Builtins::kTypedArrayOf, 0,
+ false);
+ SimpleInstallFunction(typed_array_fun, "from", Builtins::kTypedArrayFrom, 1,
+ false);
+
// Setup %TypedArrayPrototype%.
Handle<JSObject> prototype(
JSObject::cast(typed_array_fun->instance_prototype()));
@@ -3068,6 +2983,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeEvery, 1, false);
SimpleInstallFunction(prototype, "fill",
Builtins::kTypedArrayPrototypeFill, 1, false);
+ SimpleInstallFunction(prototype, "filter",
+ Builtins::kTypedArrayPrototypeFilter, 1, false);
SimpleInstallFunction(prototype, "find", Builtins::kTypedArrayPrototypeFind,
1, false);
SimpleInstallFunction(prototype, "findIndex",
@@ -3094,6 +3011,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeSlice, 2, false);
SimpleInstallFunction(prototype, "some", Builtins::kTypedArrayPrototypeSome,
1, false);
+ SimpleInstallFunction(prototype, "subarray",
+ Builtins::kTypedArrayPrototypeSubArray, 2, false);
}
{ // -- T y p e d A r r a y s
@@ -3106,29 +3025,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
-
- // %typed_array_construct_by_length
- Handle<JSFunction> construct_by_length = SimpleCreateFunction(
- isolate,
- factory->NewStringFromAsciiChecked("typedArrayConstructByLength"),
- Builtins::kTypedArrayConstructByLength, 3, false);
- native_context()->set_typed_array_construct_by_length(*construct_by_length);
-
- // %typed_array_construct_by_array_buffer
- Handle<JSFunction> construct_by_buffer = SimpleCreateFunction(
- isolate,
- factory->NewStringFromAsciiChecked("typedArrayConstructByArrayBuffer"),
- Builtins::kTypedArrayConstructByArrayBuffer, 5, false);
- native_context()->set_typed_array_construct_by_array_buffer(
- *construct_by_buffer);
-
- // %typed_array_construct_by_array_like
- Handle<JSFunction> construct_by_array_like = SimpleCreateFunction(
- isolate,
- factory->NewStringFromAsciiChecked("typedArrayConstructByArrayLike"),
- Builtins::kTypedArrayConstructByArrayLike, 4, false);
- native_context()->set_typed_array_construct_by_array_like(
- *construct_by_array_like);
}
{ // -- D a t a V i e w
@@ -3219,7 +3115,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(js_map_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->Map_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3263,6 +3158,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
BuiltinFunctionId::kMapSize);
SimpleInstallFunction(prototype, "values", Builtins::kMapPrototypeValues, 0,
true);
+
+ native_context()->set_initial_map_prototype_map(prototype->map());
+
InstallSpeciesGetter(js_map_fun);
}
@@ -3275,7 +3173,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(js_set_fun->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->Set_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3314,6 +3211,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::AddProperty(prototype, factory->keys_string(), values, DONT_ENUM);
JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
DONT_ENUM);
+
+ native_context()->set_initial_set_prototype_map(prototype->map());
+
InstallSpeciesGetter(js_set_fun);
}
@@ -3369,7 +3269,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->WeakMap_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3380,13 +3279,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kWeakMapPrototypeDelete, 1, true);
SimpleInstallFunction(prototype, "get", Builtins::kWeakMapGet, 1, true);
SimpleInstallFunction(prototype, "has", Builtins::kWeakMapHas, 1, true);
- SimpleInstallFunction(prototype, "set", Builtins::kWeakMapPrototypeSet, 2,
- true);
+ Handle<JSFunction> weakmap_set = SimpleInstallFunction(
+ prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true);
+ native_context()->set_weakmap_set(*weakmap_set);
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("WeakMap"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ native_context()->set_initial_weakmap_prototype_map(prototype->map());
}
{ // -- W e a k S e t
@@ -3398,7 +3300,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
- shared->set_instance_class_name(isolate->heap()->WeakSet_string());
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3408,13 +3309,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "delete",
Builtins::kWeakSetPrototypeDelete, 1, true);
SimpleInstallFunction(prototype, "has", Builtins::kWeakSetHas, 1, true);
- SimpleInstallFunction(prototype, "add", Builtins::kWeakSetPrototypeAdd, 1,
- true);
+ Handle<JSFunction> weakset_add = SimpleInstallFunction(
+ prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true);
+ native_context()->set_weakset_add(*weakset_add);
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
factory->NewStringFromAsciiChecked("WeakSet"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ native_context()->set_initial_weakset_prototype_map(prototype->map());
}
{ // -- P r o x y
@@ -3541,16 +3445,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- sloppy arguments map
- // Make sure we can recognize argument objects at runtime.
- // This is done by introducing an anonymous function with
- // class_name equals 'Arguments'.
Handle<String> arguments_string = factory->Arguments_string();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
arguments_string, BUILTIN_CODE(isolate, Illegal),
isolate->initial_object_prototype(), JS_ARGUMENTS_TYPE,
JSSloppyArgumentsObject::kSize, 2, Builtins::kIllegal, MUTABLE);
Handle<JSFunction> function = factory->NewFunction(args);
- function->shared()->set_instance_class_name(*arguments_string);
Handle<Map> map(function->initial_map());
// Create the descriptor array for the arguments object.
@@ -3641,8 +3541,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CreateFunction(isolate, factory->empty_string(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE, JSObject::kHeaderSize,
0, factory->the_hole_value(), Builtins::kIllegal);
- Handle<String> name = factory->InternalizeUtf8String("context_extension");
- context_extension_fun->shared()->set_instance_class_name(*name);
native_context()->set_context_extension_function(*context_extension_fun);
}
@@ -3674,9 +3572,14 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
Handle<JSFunction> result = InstallFunction(
global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithEmbedderFields,
- 0, factory()->the_hole_value(), Builtins::kIllegal);
+ 0, factory()->the_hole_value(), Builtins::kTypedArrayConstructor);
result->initial_map()->set_elements_kind(elements_kind);
+ result->shared()->DontAdaptArguments();
+ result->shared()->set_length(3);
+ result->shared()->SetConstructStub(
+ *BUILTIN_CODE(isolate_, TypedArrayConstructor_ConstructStub));
+
CHECK(JSObject::SetPrototype(result, typed_array_function, false, kDontThrow)
.FromJust());
@@ -3757,25 +3660,15 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<Object> argv[],
NativesFlag natives_flag) {
SuppressDebug compiling_natives(isolate->debug());
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
- isolate->StackOverflow();
- return false;
- }
Handle<Context> context(isolate->context());
-
Handle<String> script_name =
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, script_name, 0, 0, ScriptOriginOptions(),
- MaybeHandle<Object>(), context, nullptr, nullptr,
- ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
- natives_flag, MaybeHandle<FixedArray>());
+ source, Compiler::ScriptDetails(script_name), ScriptOriginOptions(),
+ nullptr, nullptr, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, natives_flag);
Handle<SharedFunctionInfo> function_info;
if (!maybe_function_info.ToHandle(&function_info)) return false;
@@ -3838,11 +3731,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, script_name, 0, 0, ScriptOriginOptions(),
- MaybeHandle<Object>(), context, extension, nullptr,
- ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE,
- MaybeHandle<FixedArray>());
+ source, Compiler::ScriptDetails(script_name), ScriptOriginOptions(),
+ extension, nullptr, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(name, function_info);
}
@@ -4012,7 +3903,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- S e t I t e r a t o r
- Handle<String> name = factory->InternalizeUtf8String("Set Iterator");
+ Handle<String> name = factory->SetIterator_string();
// Setup %SetIteratorPrototype%.
Handle<JSObject> prototype =
@@ -4034,7 +3925,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
InstallFunction(container, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
set_iterator_function->shared()->set_native(false);
- set_iterator_function->shared()->set_instance_class_name(*name);
Handle<Map> set_value_iterator_map(set_iterator_function->initial_map(),
isolate);
@@ -4048,7 +3938,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- M a p I t e r a t o r
- Handle<String> name = factory->InternalizeUtf8String("Map Iterator");
+ Handle<String> name = factory->MapIterator_string();
// Setup %MapIteratorPrototype%.
Handle<JSObject> prototype =
@@ -4070,7 +3960,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
InstallFunction(container, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
map_iterator_function->shared()->set_native(false);
- map_iterator_function->shared()->set_instance_class_name(*name);
Handle<Map> map_key_iterator_map(map_iterator_function->initial_map(),
isolate);
@@ -4089,11 +3978,10 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
}
{ // -- S c r i p t
- Handle<String> name = factory->InternalizeUtf8String("Script");
+ Handle<String> name = factory->Script_string();
Handle<JSFunction> script_fun = InstallFunction(
container, name, JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kUnsupportedThrower, DONT_ENUM);
- script_fun->shared()->set_instance_class_name(*name);
native_context->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -4226,34 +4114,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
{
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitCaught, 2, false);
- native_context->set_async_function_await_caught(*function);
- }
-
- {
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncFunctionAwaitUncaught, 2, false);
- native_context->set_async_function_await_uncaught(*function);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncFunctionAwaitRejectClosure,
- factory->empty_string(), 1);
- native_context->set_async_function_await_reject_shared_fun(*info);
- }
-
- {
- Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
- isolate, Builtins::kAsyncFunctionAwaitResolveClosure,
- factory->empty_string(), 1);
- native_context->set_async_function_await_resolve_shared_fun(*info);
- }
-
- {
- Handle<JSFunction> function =
- SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kAsyncFunctionPromiseCreate, 0, false);
native_context->set_async_function_promise_create(*function);
}
@@ -4344,7 +4204,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
@@ -4354,6 +4213,7 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_catch_binding)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_subsume_json)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -4390,6 +4250,41 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
}
}
+void Genesis::InitializeGlobal_harmony_string_trimming() {
+ if (!FLAG_harmony_string_trimming) return;
+
+ Handle<JSGlobalObject> global(native_context()->global_object());
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<JSObject> string_prototype(
+ native_context()->initial_string_prototype());
+
+ {
+ Handle<String> trim_left_name = factory->InternalizeUtf8String("trimLeft");
+ Handle<String> trim_start_name =
+ factory->InternalizeUtf8String("trimStart");
+ Handle<JSFunction> trim_left_fun = Handle<JSFunction>::cast(
+ JSObject::GetProperty(string_prototype, trim_left_name)
+ .ToHandleChecked());
+ JSObject::AddProperty(string_prototype, trim_start_name, trim_left_fun,
+ DONT_ENUM);
+ trim_left_fun->shared()->set_name(*trim_start_name);
+ }
+
+ {
+ Handle<String> trim_right_name =
+ factory->InternalizeUtf8String("trimRight");
+ Handle<String> trim_end_name = factory->InternalizeUtf8String("trimEnd");
+ Handle<JSFunction> trim_right_fun = Handle<JSFunction>::cast(
+ JSObject::GetProperty(string_prototype, trim_right_name)
+ .ToHandleChecked());
+ JSObject::AddProperty(string_prototype, trim_end_name, trim_right_fun,
+ DONT_ENUM);
+ trim_right_fun->shared()->set_name(*trim_end_name);
+ }
+}
+
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -4423,7 +4318,6 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
// to prototype, so we update the saved map.
Handle<Map> prototype_map(prototype->map());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate());
- native_context()->set_promise_prototype_map(*prototype_map);
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
@@ -4456,10 +4350,19 @@ void Genesis::InitializeGlobal_harmony_promise_finally() {
}
void Genesis::InitializeGlobal_harmony_bigint() {
- if (!FLAG_harmony_bigint) return;
-
Factory* factory = isolate()->factory();
Handle<JSGlobalObject> global(native_context()->global_object());
+ if (!FLAG_harmony_bigint) {
+ // Typed arrays are installed by default; remove them if the flag is off.
+ CHECK(JSObject::DeleteProperty(
+ global, factory->InternalizeUtf8String("BigInt64Array"))
+ .ToChecked());
+ CHECK(JSObject::DeleteProperty(
+ global, factory->InternalizeUtf8String("BigUint64Array"))
+ .ToChecked());
+ return;
+ }
+
Handle<JSFunction> bigint_fun =
InstallFunction(global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kBigIntConstructor);
@@ -4474,9 +4377,6 @@ void Genesis::InitializeGlobal_harmony_bigint() {
Context::BIGINT_FUNCTION_INDEX);
// Install the properties of the BigInt constructor.
- // parseInt(string, radix)
- SimpleInstallFunction(bigint_fun, "parseInt", Builtins::kBigIntParseInt, 2,
- false);
// asUintN(bits, bigint)
SimpleInstallFunction(bigint_fun, "asUintN", Builtins::kBigIntAsUintN, 2,
false);
@@ -4503,6 +4403,20 @@ void Genesis::InitializeGlobal_harmony_bigint() {
JSObject::AddProperty(prototype, factory->to_string_tag_symbol(),
factory->BigInt_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Install 64-bit DataView accessors.
+ // TODO(jkummerow): Move these to the "DataView" section when dropping the
+ // FLAG_harmony_bigint.
+ Handle<JSObject> dataview_prototype(
+ JSObject::cast(native_context()->data_view_fun()->instance_prototype()));
+ SimpleInstallFunction(dataview_prototype, "getBigInt64",
+ Builtins::kDataViewPrototypeGetBigInt64, 1, false);
+ SimpleInstallFunction(dataview_prototype, "setBigInt64",
+ Builtins::kDataViewPrototypeSetBigInt64, 2, false);
+ SimpleInstallFunction(dataview_prototype, "getBigUint64",
+ Builtins::kDataViewPrototypeGetBigUint64, 1, false);
+ SimpleInstallFunction(dataview_prototype, "setBigUint64",
+ Builtins::kDataViewPrototypeSetBigUint64, 2, false);
}
#ifdef V8_INTL_SUPPORT
@@ -4554,7 +4468,6 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
array_buffer_fun->shared()->SetConstructStub(*code);
array_buffer_fun->shared()->DontAdaptArguments();
array_buffer_fun->shared()->set_length(1);
- array_buffer_fun->shared()->set_instance_class_name(*name);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory()->constructor_string(),
@@ -4650,10 +4563,30 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
InstallInternalArray(extras_utils, "InternalPackedArray", PACKED_ELEMENTS);
- InstallFunction(extras_utils, isolate()->promise_internal_constructor(),
+ // v8.createPromise(parent)
+ Handle<JSFunction> promise_internal_constructor =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kPromiseInternalConstructor, 1, true);
+ promise_internal_constructor->shared()->set_native(false);
+ InstallFunction(extras_utils, promise_internal_constructor,
factory()->NewStringFromAsciiChecked("createPromise"));
- InstallFunction(extras_utils, isolate()->promise_resolve(),
+
+ // v8.rejectPromise(promise, reason)
+ Handle<JSFunction> promise_internal_reject =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kPromiseInternalReject, 2, true);
+ promise_internal_reject->shared()->set_native(false);
+ InstallFunction(extras_utils, promise_internal_reject,
+ factory()->NewStringFromAsciiChecked("rejectPromise"));
+
+ // v8.resolvePromise(promise, resolution)
+ Handle<JSFunction> promise_internal_resolve =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kPromiseInternalResolve, 2, true);
+ promise_internal_resolve->shared()->set_native(false);
+ InstallFunction(extras_utils, promise_internal_resolve,
factory()->NewStringFromAsciiChecked("resolvePromise"));
+
InstallFunction(extras_utils, isolate()->is_promise(),
factory()->NewStringFromAsciiChecked("isPromise"));
@@ -4699,8 +4632,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
native_context()->set_fast_template_instantiations_cache(
*fast_template_instantiations_cache);
- auto slow_template_instantiations_cache =
- NumberDictionary::New(isolate(), ApiNatives::kInitialFunctionCacheSize);
+ auto slow_template_instantiations_cache = SimpleNumberDictionary::New(
+ isolate(), ApiNatives::kInitialFunctionCacheSize);
native_context()->set_slow_template_instantiations_cache(
*slow_template_instantiations_cache);
@@ -5272,6 +5205,11 @@ bool Genesis::ConfigureGlobalObjects(
native_context()->set_js_map_map(js_map_fun->initial_map());
native_context()->set_js_set_map(js_set_fun->initial_map());
+ Handle<JSFunction> js_array_constructor(native_context()->array_function());
+ Handle<JSObject> js_array_prototype(
+ JSObject::cast(js_array_constructor->instance_prototype()));
+ native_context()->set_initial_array_prototype_map(js_array_prototype->map());
+
return true;
}
@@ -5434,15 +5372,6 @@ Genesis::Genesis(
// on all function exits.
SaveContext saved_context(isolate);
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- isolate->StackOverflow();
- return;
- }
-
// The deserializer needs to hook up references to the global proxy.
// Create an uninitialized global proxy now if we don't have one
// and initialize it later in CreateNewGlobals.
@@ -5571,15 +5500,6 @@ Genesis::Genesis(Isolate* isolate,
// on all function exits.
SaveContext saved_context(isolate);
- // During genesis, the boilerplate for stack overflow won't work until the
- // environment has been at least partially initialized. Add a stack check
- // before entering JS code to catch overflow early.
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) {
- isolate->StackOverflow();
- return;
- }
-
const int proxy_size = JSGlobalProxy::SizeWithEmbedderFields(
global_proxy_template->InternalFieldCount());
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 8afd0a0601..a554496dfd 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -25,7 +25,7 @@ class SourceCodeCache final BASE_EMBEDDED {
void Initialize(Isolate* isolate, bool create_heap_objects);
void Iterate(RootVisitor* v) {
- v->VisitRootPointer(Root::kExtensions,
+ v->VisitRootPointer(Root::kExtensions, nullptr,
bit_cast<Object**, FixedArray**>(&cache_));
}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 2b2b9c2b34..1ea0bb733b 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -156,13 +156,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -190,6 +183,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ pop(r0);
__ SmiUntag(r0, r0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
@@ -297,7 +291,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ b(ne, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -417,7 +411,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r4, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -559,9 +553,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
- __ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(scratch);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -828,9 +823,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ add(r2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -844,10 +840,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -857,11 +856,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmp(bytecode, Operand(0x1));
- __ b(hi, &load_size);
+ __ b(hi, &process_bytecode);
__ b(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -869,7 +868,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -878,8 +877,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ b(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -907,7 +914,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1008,11 +1015,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Call(r4);
+ __ ldr(
+ kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1025,16 +1033,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ cmp(r1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ b(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1215,13 +1220,14 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
- __ ldr(scratch, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
- kPointerSizeLog2));
- __ Jump(scratch);
+ __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
+ kPointerSizeLog2));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1237,14 +1243,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r1, r2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r1, r2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1262,7 +1274,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1275,7 +1287,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1304,7 +1320,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1491,9 +1507,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
namespace {
@@ -1978,7 +1995,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2449,9 +2466,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r0 : expected number of arguments
// r1 : function (passed through to callee)
// r3 : new target (passed through to callee)
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2464,9 +2482,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r4);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index dd92af89bb..54d2524d6e 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -180,6 +173,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(x0);
}
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
@@ -332,7 +326,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kDerivedConstructorMask,
+ __ TestAndBranchIfAnySet(w4,
+ SharedFunctionInfo::IsDerivedConstructorBit::kMask,
&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -460,11 +455,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
- __ TestAndBranchIfAllClear(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAllClear(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
} else {
- __ TestAndBranchIfAnySet(w4, SharedFunctionInfo::kClassConstructorMask,
- &use_receiver);
+ __ TestAndBranchIfAnySet(
+ w4, SharedFunctionInfo::IsClassConstructorBit::kMask, &use_receiver);
__ CallRuntime(
Runtime::kIncrementUseCounterConstructorReturnNonUndefinedPrimitive);
__ B(&use_receiver);
@@ -552,7 +547,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex);
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
@@ -617,9 +612,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(x3, x1);
__ Move(x1, x4);
- __ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeOffset));
- __ Add(x5, x5, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(x5);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@@ -663,7 +659,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
- __ Sub(scratch, masm->StackPointer(), scratch);
+ __ Sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
__ B(le, stack_overflow);
@@ -745,7 +741,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Poke the result into the stack.
__ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
// Loop if we've not reached the end of copy marker.
- __ Cmp(__ StackPointer(), scratch);
+ __ Cmp(sp, scratch);
__ B(lt, &loop);
__ Bind(&done);
@@ -920,9 +916,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Add(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Add(x2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -936,10 +933,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -949,11 +949,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ Cmp(bytecode, Operand(0x1));
- __ B(hi, &load_size);
+ __ B(hi, &process_bytecode);
__ B(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -961,7 +961,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ B(&load_size);
+ __ B(&process_bytecode);
__ Bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -970,8 +970,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ Bind(&load_size);
+ __ Bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ B(if_return, eq);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ Add(bytecode_offset, bytecode_offset, scratch1);
}
@@ -998,7 +1006,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1009,7 +1017,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
- __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
@@ -1022,7 +1030,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Bind(&bytecode_array_loaded);
// Increment invocation count for the function.
- __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
__ Add(w10, w10, Operand(1));
@@ -1060,7 +1068,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
- __ Sub(x10, __ StackPointer(), Operand(x11));
+ __ Sub(x10, sp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -1101,11 +1109,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Call(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1118,16 +1127,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ B(&do_return, eq);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &do_return);
__ B(&do_dispatch);
__ bind(&do_return);
@@ -1336,11 +1342,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
- __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Jump(ip0);
+ __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
+ __ Ldr(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1356,14 +1363,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, x1, x2);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, x1, x2,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1381,7 +1394,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = x2;
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1394,7 +1407,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1423,7 +1440,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1613,7 +1630,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
- __ Ldr(w4, UntagSmiMemOperand(__ StackPointer(), 3 * kPointerSize));
+ __ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@@ -1646,7 +1663,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kPointerSize;
// Set up frame pointer.
- __ Add(fp, __ StackPointer(), frame_size);
+ __ Add(fp, sp, frame_size);
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
@@ -1682,7 +1699,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
// Restore fp, lr.
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Call builtin.
@@ -2090,8 +2107,7 @@ void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(x11, x1); // x1: function
__ SmiTag(x11, x0); // x0: number of arguments.
__ Push(x11, padreg);
- __ Add(fp, __ StackPointer(),
- ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
}
void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
@@ -2101,7 +2117,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Get the number of arguments passed (as a smi), tear down the frame and
// then drop the parameters and the receiver.
__ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Mov(__ StackPointer(), fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
// Drop actual parameters and receiver.
@@ -2194,7 +2210,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2341,7 +2357,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::kClassConstructorMask,
+ __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2467,7 +2483,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
- __ Sub(x10, masm->StackPointer(), x10);
+ __ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
@@ -2539,8 +2555,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
__ Tbz(bound_argc, 0, &done);
// Store receiver.
- __ Add(scratch, __ StackPointer(),
- Operand(total_argc, LSL, kPointerSizeLog2));
+ __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
__ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
__ Tbnz(total_argc, 0, &done);
// Store padding.
@@ -2825,7 +2840,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
- Register code_entry = x10;
Label dont_adapt_arguments, stack_overflow;
@@ -2854,7 +2868,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kPointerSize);
- __ Mov(copy_to, __ StackPointer());
+ __ Mov(copy_to, sp);
// Preparing the expected arguments is done in four steps, the order of
// which is chosen so we can use LDP/STP and avoid conditional branches as
@@ -2918,8 +2932,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ RecordComment("-- Store receiver --");
__ Add(copy_from, fp, 2 * kPointerSize);
__ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
- __ Str(scratch1,
- MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2));
+ __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
@@ -2927,9 +2940,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// x0 : expected number of arguments
// x1 : function (passed through to callee)
// x3 : new target (passed through to callee)
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(x2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2941,9 +2955,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ RecordComment("-- Call without adapting args --");
__ Bind(&dont_adapt_arguments);
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
- __ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(x2);
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index d50e045069..971fb7c678 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -98,18 +98,13 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
if (!raw_call_data->IsUndefined(isolate)) {
DCHECK(raw_call_data->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
- LOG(isolate, ApiObjectAccess("call", JSObject::cast(*js_receiver)));
FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
*new_target, &args[0] - 1,
args.length() - 1);
-
- Handle<Object> result = custom.Call(callback);
+ Handle<Object> result = custom.Call(call_data);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) {
@@ -154,7 +149,7 @@ class RelocatableArguments : public BuiltinArguments, public Relocatable {
virtual inline void IterateInstance(RootVisitor* v) {
if (length() == 0) return;
- v->VisitRootPointers(Root::kRelocatable, lowest_address(),
+ v->VisitRootPointers(Root::kRelocatable, nullptr, lowest_address(),
highest_address() + 1);
}
@@ -256,12 +251,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
Object* handler =
constructor->shared()->get_api_func_data()->instance_call_handler();
DCHECK(!handler->IsUndefined(isolate));
- // TODO(ishell): remove this debugging code.
- CHECK(handler->IsCallHandlerInfo());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
// Get the data for the call and perform the callback.
Object* result;
@@ -272,7 +262,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
obj, new_target, &args[0] - 1,
args.length() - 1);
- Handle<Object> result_handle = custom.Call(callback);
+ Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
result = isolate->heap()->undefined_value();
} else {
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 7db8b971d7..2bf5e1c343 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -44,7 +44,7 @@ ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
Node* formal_parameter_count =
LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
MachineType::Int32());
- formal_parameter_count = Word32ToParameter(formal_parameter_count, mode);
+ formal_parameter_count = Int32ToParameter(formal_parameter_count, mode);
argument_count.Bind(formal_parameter_count);
Node* marker_or_function = LoadBufferObject(
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 027baa2873..52a6222882 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -2,53 +2,50 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-string-gen.h"
+#include "src/builtins/builtins-typedarray-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/factory-inl.h"
#include "src/frame-constants.h"
+#include "src/builtins/builtins-array-gen.h"
+
namespace v8 {
namespace internal {
-class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
- public:
- explicit ArrayBuiltinCodeStubAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state),
- k_(this, MachineRepresentation::kTagged),
- a_(this, MachineRepresentation::kTagged),
- to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
- fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
-
- typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
- BuiltinResultGenerator;
-
- typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm,
- Node* k_value, Node* k)>
- CallResultProcessor;
-
- typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
- PostLoopAction;
+using Node = compiler::Node;
- enum class MissingPropertyMode { kSkip, kUseUndefined };
+ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state),
+ k_(this, MachineRepresentation::kTagged),
+ a_(this, MachineRepresentation::kTagged),
+ to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
+ fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
- void FindResultGenerator() { a_.Bind(UndefinedConstant()); }
+void ArrayBuiltinsAssembler::FindResultGenerator() {
+ a_.Bind(UndefinedConstant());
+}
- Node* FindProcessor(Node* k_value, Node* k) {
- Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
- this_arg(), k_value, k, o());
- Label false_continue(this), return_true(this);
- BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
- BIND(&return_true);
- ReturnFromBuiltin(k_value);
- BIND(&false_continue);
- return a();
+Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
+ Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
+ this_arg(), k_value, k, o());
+ Label false_continue(this), return_true(this);
+ BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
+ BIND(&return_true);
+ ReturnFromBuiltin(k_value);
+ BIND(&false_continue);
+ return a();
}
- void FindIndexResultGenerator() { a_.Bind(SmiConstant(-1)); }
+ void ArrayBuiltinsAssembler::FindIndexResultGenerator() {
+ a_.Bind(SmiConstant(-1));
+ }
- Node* FindIndexProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FindIndexProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label false_continue(this), return_true(this);
@@ -59,17 +56,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void ForEachResultGenerator() { a_.Bind(UndefinedConstant()); }
+ void ArrayBuiltinsAssembler::ForEachResultGenerator() {
+ a_.Bind(UndefinedConstant());
+ }
- Node* ForEachProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::ForEachProcessor(Node* k_value, Node* k) {
CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
k_value, k, o());
return a();
}
- void SomeResultGenerator() { a_.Bind(FalseConstant()); }
+ void ArrayBuiltinsAssembler::SomeResultGenerator() {
+ a_.Bind(FalseConstant());
+ }
- Node* SomeProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::SomeProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label false_continue(this), return_true(this);
@@ -80,9 +81,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void EveryResultGenerator() { a_.Bind(TrueConstant()); }
+ void ArrayBuiltinsAssembler::EveryResultGenerator() {
+ a_.Bind(TrueConstant());
+ }
- Node* EveryProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::EveryProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
this_arg(), k_value, k, o());
Label true_continue(this), return_false(this);
@@ -93,9 +96,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void ReduceResultGenerator() { return a_.Bind(this_arg()); }
+ void ArrayBuiltinsAssembler::ReduceResultGenerator() {
+ return a_.Bind(this_arg());
+ }
- Node* ReduceProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::ReduceProcessor(Node* k_value, Node* k) {
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this, {&result}), initial(this);
GotoIf(WordEqual(a(), TheHoleConstant()), &initial);
@@ -111,21 +116,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return result.value();
}
- void ReducePostLoopAction() {
+ void ArrayBuiltinsAssembler::ReducePostLoopAction() {
Label ok(this);
GotoIf(WordNotEqual(a(), TheHoleConstant()), &ok);
ThrowTypeError(context(), MessageTemplate::kReduceNoInitial);
BIND(&ok);
}
- void FilterResultGenerator() {
+ void ArrayBuiltinsAssembler::FilterResultGenerator() {
// 7. Let A be ArraySpeciesCreate(O, 0).
// This version of ArraySpeciesCreate will create with the correct
// ElementsKind in the fast case.
- ArraySpeciesCreate();
+ GenerateArraySpeciesCreate();
}
- Node* FilterProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FilterProcessor(Node* k_value, Node* k) {
// ii. Let selected be ToBoolean(? Call(callbackfn, T, kValue, k, O)).
Node* selected = CallJS(CodeFactory::Call(isolate()), context(),
callbackfn(), this_arg(), k_value, k, o());
@@ -191,11 +196,19 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- void MapResultGenerator() { ArraySpeciesCreate(len_); }
+ void ArrayBuiltinsAssembler::MapResultGenerator() {
+ GenerateArraySpeciesCreate(len_);
+ }
- void TypedArrayMapResultGenerator() {
+ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// 6. Let A be ? TypedArraySpeciesCreate(O, len).
- Node* a = TypedArraySpeciesCreateByLength(context(), o(), len_);
+ TNode<JSTypedArray> original_array = CAST(o());
+ TNode<Smi> length = CAST(len_);
+ const char* method_name = "%TypedArray%.prototype.map";
+
+ TypedArrayBuiltinsAssembler typedarray_asm(state());
+ TNode<JSTypedArray> a = typedarray_asm.SpeciesCreateByLength(
+ context(), original_array, length, method_name);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
CSA_ASSERT(this,
@@ -206,7 +219,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
a_.Bind(a);
}
- Node* SpecCompliantMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::SpecCompliantMapProcessor(Node* k_value,
+ Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// SpecCompliantMapProcessor.
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
@@ -218,7 +232,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- Node* FastMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::FastMapProcessor(Node* k_value, Node* k) {
// i. Let kValue be ? Get(O, Pk). Performed by the caller of
// FastMapProcessor.
// ii. Let mapped_value be ? Call(callbackfn, T, kValue, k, O).
@@ -312,7 +326,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
- Node* TypedArrayMapProcessor(Node* k_value, Node* k) {
+ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) {
// 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(),
callbackfn(), this_arg(), k_value, k, o());
@@ -325,11 +339,21 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Branch(fast_typed_array_target_, &fast, &slow);
BIND(&fast);
- // #sec-integerindexedelementset 3. Let numValue be ? ToNumber(value).
- Node* num_value = ToNumber(context(), mapped_value);
+ // #sec-integerindexedelementset
+ // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
+ // numValue be ? ToBigInt(v).
+ // 6. Otherwise, let numValue be ? ToNumber(value).
+ Node* num_value;
+ if (source_elements_kind_ == BIGINT64_ELEMENTS ||
+ source_elements_kind_ == BIGUINT64_ELEMENTS) {
+ num_value = ToBigInt(context(), mapped_value);
+ } else {
+ num_value = ToNumber_Inline(context(), mapped_value);
+ }
// The only way how this can bailout is because of a detached buffer.
EmitElementStore(a(), k, num_value, false, source_elements_kind_,
- KeyedAccessStoreMode::STANDARD_STORE, &detached);
+ KeyedAccessStoreMode::STANDARD_STORE, &detached,
+ context());
Goto(&done);
BIND(&slow);
@@ -339,28 +363,16 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&detached);
// tc39.github.io/ecma262/#sec-integerindexedelementset
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // 8. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
BIND(&done);
return a();
}
- void NullPostLoopAction() {}
+ void ArrayBuiltinsAssembler::NullPostLoopAction() {}
- protected:
- Node* context() { return context_; }
- Node* receiver() { return receiver_; }
- Node* new_target() { return new_target_; }
- Node* argc() { return argc_; }
- Node* o() { return o_; }
- Node* len() { return len_; }
- Node* callbackfn() { return callbackfn_; }
- Node* this_arg() { return this_arg_; }
- Node* k() { return k_.value(); }
- Node* a() { return a_.value(); }
-
- void ReturnFromBuiltin(Node* value) {
+ void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
if (argc_ == nullptr) {
Return(value);
} else {
@@ -370,9 +382,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void InitIteratingArrayBuiltinBody(Node* context, Node* receiver,
- Node* callbackfn, Node* this_arg,
- Node* new_target, Node* argc) {
+ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* new_target, TNode<IntPtrT> argc) {
context_ = context;
receiver_ = receiver;
new_target_ = new_target;
@@ -381,12 +393,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
argc_ = argc;
}
- void GenerateIteratingArrayBuiltinBody(
+ void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
const Callable& slow_case_continuation,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward) {
+ MissingPropertyMode missing_property_mode, ForEachDirection direction) {
Label non_array(this), array_changes(this, {&k_, &a_, &to_});
// TODO(danno): Seriously? Do we really need to throw the exact error
@@ -398,7 +409,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O)
- o_ = CallBuiltin(Builtins::kToObject, context(), receiver());
+ o_ = ToObject(context(), receiver());
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -453,15 +464,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
ReturnFromBuiltin(result);
}
- void InitIteratingArrayBuiltinLoopContinuation(Node* context, Node* receiver,
- Node* callbackfn,
- Node* this_arg, Node* a,
- Node* o, Node* initial_k,
- Node* len, Node* to) {
+ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinLoopContinuation(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to) {
context_ = context;
this_arg_ = this_arg;
callbackfn_ = callbackfn;
- argc_ = nullptr;
a_.Bind(a);
k_.Bind(initial_k);
o_ = o;
@@ -469,10 +477,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
to_.Bind(to);
}
- void GenerateIteratingTypedArrayBuiltinBody(
+ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
- ForEachDirection direction = ForEachDirection::kForward) {
+ ForEachDirection direction) {
name_ = name;
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
@@ -525,6 +533,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&distinguish_types);
+ generator(this);
+
if (direction == ForEachDirection::kForward) {
k_.Bind(SmiConstant(0));
} else {
@@ -539,7 +549,6 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Label done(this);
source_elements_kind_ = ElementsKindForInstanceType(
static_cast<InstanceType>(instance_types[i]));
- generator(this);
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
// spec violation. Should go to &throw_detached and throw a TypeError
// instead.
@@ -552,10 +561,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void GenerateIteratingArrayBuiltinLoopContinuation(
+ void ArrayBuiltinsAssembler::GenerateIteratingArrayBuiltinLoopContinuation(
const CallResultProcessor& processor, const PostLoopAction& action,
- MissingPropertyMode missing_property_mode,
- ForEachDirection direction = ForEachDirection::kForward) {
+ MissingPropertyMode missing_property_mode, ForEachDirection direction) {
Label loop(this, {&k_, &a_, &to_});
Label after_loop(this);
Goto(&loop);
@@ -613,8 +621,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Return(a_.value());
}
- private:
- static ElementsKind ElementsKindForInstanceType(InstanceType type) {
+ ElementsKind ArrayBuiltinsAssembler::ElementsKindForInstanceType(
+ InstanceType type) {
switch (type) {
#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE: \
@@ -628,9 +636,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void VisitAllTypedArrayElements(Node* array_buffer,
- const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction) {
+ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
+ Node* array_buffer, const CallResultProcessor& processor, Label* detached,
+ ForEachDirection direction) {
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
@@ -660,11 +668,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
advance_mode);
}
- void VisitAllFastElementsOneKind(ElementsKind kind,
- const CallResultProcessor& processor,
- Label* array_changed, ParameterMode mode,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ void ArrayBuiltinsAssembler::VisitAllFastElementsOneKind(
+ ElementsKind kind, const CallResultProcessor& processor,
+ Label* array_changed, ParameterMode mode, ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Comment("begin VisitAllFastElementsOneKind");
VARIABLE(original_map, MachineRepresentation::kTagged);
original_map.Bind(LoadMap(o()));
@@ -735,10 +742,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Comment("end VisitAllFastElementsOneKind");
}
- void HandleFastElements(const CallResultProcessor& processor,
- const PostLoopAction& action, Label* slow,
- ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ void ArrayBuiltinsAssembler::HandleFastElements(
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ Label* slow, ForEachDirection direction,
+ MissingPropertyMode missing_property_mode) {
Label switch_on_elements_kind(this), fast_elements(this),
maybe_double_elements(this), fast_double_elements(this);
@@ -788,7 +795,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
// This version is specialized to create a zero length array
// of the elements kind of the input array.
- void ArraySpeciesCreate() {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate() {
Label runtime(this, Label::kDeferred), done(this);
TNode<Smi> len = SmiConstant(0);
@@ -810,9 +817,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// Respect the ElementsKind of the input array.
TNode<Int32T> elements_kind = LoadMapElementsKind(original_map);
GotoIfNot(IsFastElementsKind(elements_kind), &runtime);
- TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Context> native_context = LoadNativeContext(context());
TNode<Map> array_map =
- CAST(LoadJSArrayElementsMap(elements_kind, native_context));
+ LoadJSArrayElementsMap(elements_kind, native_context);
TNode<JSArray> array =
CAST(AllocateJSArray(GetInitialFastElementsKind(), array_map, len, len,
nullptr, CodeStubAssembler::SMI_PARAMETERS));
@@ -834,7 +841,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void ArraySpeciesCreate(SloppyTNode<Smi> len) {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(
+ SloppyTNode<Smi> len) {
Label runtime(this, Label::kDeferred), done(this);
Node* const original_map = LoadMap(o());
@@ -860,9 +868,9 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
// element in the input array (maybe the callback deletes an element).
const ElementsKind elements_kind =
GetHoleyElementsKind(GetInitialFastElementsKind());
- TNode<Context> native_context = CAST(LoadNativeContext(context()));
+ TNode<Context> native_context = LoadNativeContext(context());
TNode<Map> array_map =
- CAST(LoadJSArrayElementsMap(elements_kind, native_context));
+ LoadJSArrayElementsMap(elements_kind, native_context);
a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, len, nullptr,
CodeStubAssembler::SMI_PARAMETERS));
@@ -881,30 +889,14 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&done);
}
- Node* callbackfn_ = nullptr;
- Node* o_ = nullptr;
- Node* this_arg_ = nullptr;
- Node* len_ = nullptr;
- Node* context_ = nullptr;
- Node* receiver_ = nullptr;
- Node* new_target_ = nullptr;
- Node* argc_ = nullptr;
- Node* fast_typed_array_target_ = nullptr;
- const char* name_ = nullptr;
- Variable k_;
- Variable a_;
- Variable to_;
- Label fully_spec_compliant_;
- ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
-};
-
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Label runtime(this, Label::kDeferred);
Label fast(this);
@@ -920,16 +912,18 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
- Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ TNode<JSArray> array_receiver = CAST(receiver);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ Node* length =
+ LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
- EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+ EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(receiver);
+ Node* elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements),
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&runtime);
@@ -945,10 +939,10 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
capacity),
&runtime);
- StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
GotoIf(Int32LessThanOrEqual(elements_kind,
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
&fast_elements);
@@ -1008,12 +1002,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSArray> array_receiver;
Node* kind = nullptr;
Label fast(this);
@@ -1021,13 +1017,14 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&fast);
{
+ array_receiver = CAST(receiver);
arg_index = IntPtrConstant(0);
- kind = EnsureArrayPushable(receiver, &runtime);
+ kind = EnsureArrayPushable(array_receiver, &runtime);
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
- Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, receiver, &args,
- &arg_index, &smi_transition);
+ Node* new_length = BuildAppendJSArray(PACKED_SMI_ELEMENTS, array_receiver,
+ &args, &arg_index, &smi_transition);
args.PopAndReturn(new_length);
}
@@ -1037,17 +1034,17 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// the most generic implementation for the rest of the array.
BIND(&smi_transition);
{
- Node* arg = args.AtIndex(arg_index);
+ Node* arg = args.AtIndex(arg_index.value());
GotoIf(TaggedIsSmi(arg), &default_label);
- Node* length = LoadJSArrayLength(receiver);
+ Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* bit_field2 = LoadMapBitField2(map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
@@ -1065,16 +1062,16 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&object_push);
{
- Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, receiver, &args,
- &arg_index, &default_label);
+ Node* new_length = BuildAppendJSArray(PACKED_ELEMENTS, array_receiver,
+ &args, &arg_index, &default_label);
args.PopAndReturn(new_length);
}
BIND(&double_push);
{
Node* new_length =
- BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, receiver, &args, &arg_index,
- &double_transition);
+ BuildAppendJSArray(PACKED_DOUBLE_ELEMENTS, array_receiver, &args,
+ &arg_index, &double_transition);
args.PopAndReturn(new_length);
}
@@ -1084,17 +1081,17 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// on the most generic implementation for the rest of the array.
BIND(&double_transition);
{
- Node* arg = args.AtIndex(arg_index);
+ Node* arg = args.AtIndex(arg_index.value());
GotoIfNumber(arg, &default_label);
- Node* length = LoadJSArrayLength(receiver);
+ Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
SmiConstant(LanguageMode::kStrict));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* bit_field2 = LoadMapBitField2(map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
@@ -1107,13 +1104,13 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&default_label);
{
args.ForEach(
- [this, receiver, context](Node* arg) {
- Node* length = LoadJSArrayLength(receiver);
- CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
- SmiConstant(LanguageMode::kStrict));
+ [this, array_receiver, context](Node* arg) {
+ Node* length = LoadJSArrayLength(array_receiver);
+ CallRuntime(Runtime::kSetProperty, context, array_receiver, length,
+ arg, SmiConstant(LanguageMode::kStrict));
},
- arg_index);
- args.PopAndReturn(LoadJSArrayLength(receiver));
+ arg_index.value());
+ args.PopAndReturn(LoadJSArrayLength(array_receiver));
}
BIND(&runtime);
@@ -1131,8 +1128,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* HandleFastSlice(Node* context, Node* array, Node* from, Node* count,
- Label* slow) {
+ Node* HandleFastSlice(TNode<Context> context, Node* array, Node* from,
+ Node* count, Label* slow) {
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this);
@@ -1262,7 +1259,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
return result.value();
}
- void CopyOneElement(Node* context, Node* o, Node* a, Node* p_k, Variable& n) {
+ void CopyOneElement(TNode<Context> context, Node* o, Node* a, Node* p_k,
+ Variable& n) {
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty);
@@ -1291,9 +1289,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Label slow(this, Label::kDeferred), fast_elements_kind(this);
CodeStubArguments args(this, argc);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
- VARIABLE(o, MachineRepresentation::kTagged);
+ TVARIABLE(JSReceiver, o);
VARIABLE(len, MachineRepresentation::kTagged);
Label length_done(this), generic_length(this), check_arguments_length(this),
load_arguments_length(this);
@@ -1301,8 +1299,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
GotoIf(TaggedIsSmi(receiver), &generic_length);
GotoIfNot(IsJSArray(receiver), &check_arguments_length);
- o.Bind(receiver);
- len.Bind(LoadJSArrayLength(receiver));
+ TNode<JSArray> array_receiver = CAST(receiver);
+ o = array_receiver;
+ len.Bind(LoadJSArrayLength(array_receiver));
// Check for the array clone case. There can be no arguments to slice, the
// array prototype chain must be intact and have no elements, the array has to
@@ -1318,7 +1317,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&check_arguments_length);
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(array_receiver);
Node* native_context = LoadNativeContext(context);
GotoIfContextElementEqual(map, native_context,
Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX,
@@ -1337,16 +1336,16 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&load_arguments_length);
Node* arguments_length =
- LoadObjectField(receiver, JSArgumentsObject::kLengthOffset);
+ LoadObjectField(array_receiver, JSArgumentsObject::kLengthOffset);
GotoIf(TaggedIsNotSmi(arguments_length), &generic_length);
- o.Bind(receiver);
+ o = CAST(receiver);
len.Bind(arguments_length);
Goto(&length_done);
BIND(&generic_length);
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O).
- o.Bind(CallBuiltin(Builtins::kToObject, context, receiver));
+ o = ToObject(context, receiver);
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -1359,7 +1358,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
// 5. Let relativeStart be ToInteger(start).
// 6. ReturnIfAbrupt(relativeStart).
- TNode<Object> arg0 = CAST(args.GetOptionalArgumentValue(0, SmiConstant(0)));
+ TNode<Object> arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0));
Node* relative_start = ToInteger_Inline(context, arg0);
// 7. If relativeStart < 0, let k be max((len + relativeStart),0);
@@ -1378,8 +1377,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
// 8. If end is undefined, let relativeEnd be len;
// else let relativeEnd be ToInteger(end).
// 9. ReturnIfAbrupt(relativeEnd).
- TNode<Object> end =
- CAST(args.GetOptionalArgumentValue(1, UndefinedConstant()));
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
Label end_undefined(this), end_done(this);
VARIABLE(relative_end, MachineRepresentation::kTagged);
GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined);
@@ -1460,12 +1458,13 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
}
TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
- Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Label runtime(this, Label::kDeferred);
Label fast(this);
@@ -1482,17 +1481,19 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
BIND(&fast);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(receiver)));
- Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ TNode<JSArray> array_receiver = CAST(receiver);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ Node* length =
+ LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements_tagged(this),
fast_elements_smi(this);
GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
- EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+ EnsureArrayLengthWritable(LoadMap(array_receiver), &runtime);
// 3) Check that the elements backing store isn't copy-on-write.
- Node* elements = LoadElements(receiver);
+ Node* elements = LoadElements(array_receiver);
GotoIf(WordEqual(LoadMap(elements),
LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&runtime);
@@ -1514,10 +1515,10 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
IntPtrConstant(JSArray::kMaxCopyElements)),
&runtime);
- StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
- Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
GotoIf(
Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
&fast_elements_smi);
@@ -1616,9 +1617,9 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
}
}
-TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinCodeStubAssembler) {
+TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
ParameterMode mode = OptimalParameterMode();
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* array = Parameter(Descriptor::kSource);
Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode);
Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode);
@@ -1629,8 +1630,8 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(ExtractFastJSArray(context, array, begin, count, mode));
}
-TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
+TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* array = Parameter(Descriptor::kSource);
CSA_ASSERT(this, IsJSArray(array));
@@ -1640,9 +1641,9 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) {
Return(CloneFastJSArray(context, array, mode));
}
-TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1656,16 +1657,16 @@ TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
// Continuation that is called after an eager deoptimization from TF (ex. the
// array changes during iteration).
-TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1678,9 +1679,9 @@ TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// Continuation that is called after a lazy deoptimization from TF (ex. the
// callback function is no longer callable).
-TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1695,9 +1696,9 @@ TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
// right after the callback and it's returned value must be handled before
// iteration continues.
TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1720,13 +1721,13 @@ TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
}
// ES #sec-get-%typedarray%.prototype.find
-TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1734,17 +1735,16 @@ TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.find",
- &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.find", &ArrayBuiltinsAssembler::FindResultGenerator,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation),
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
-TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1758,15 +1758,14 @@ TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
-TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1777,10 +1776,9 @@ TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1792,9 +1790,9 @@ TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation,
}
TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1817,13 +1815,13 @@ TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
}
// ES #sec-get-%typedarray%.prototype.findIndex
-TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1832,22 +1830,393 @@ TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.findIndex",
- &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayFindIndexLoopContinuation),
MissingPropertyMode::kUseUndefined, ForEachDirection::kForward);
}
+class ArrayPopulatorAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayPopulatorAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<Object> ConstructArrayLike(TNode<Context> context,
+ TNode<Object> receiver) {
+ TVARIABLE(Object, array);
+ Label is_constructor(this), is_not_constructor(this), done(this);
+ GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
+ Branch(IsConstructor(receiver), &is_constructor, &is_not_constructor);
+
+ BIND(&is_constructor);
+ {
+ array = CAST(
+ ConstructJS(CodeFactory::Construct(isolate()), context, receiver));
+ Goto(&done);
+ }
+
+ BIND(&is_not_constructor);
+ {
+ Label allocate_js_array(this);
+
+ TNode<Map> array_map = CAST(LoadContextElement(
+ context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
+
+ array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map,
+ SmiConstant(0), SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return array.value();
+ }
+
+ TNode<Object> ConstructArrayLike(TNode<Context> context,
+ TNode<Object> receiver,
+ TNode<Number> length) {
+ TVARIABLE(Object, array);
+ Label is_constructor(this), is_not_constructor(this), done(this);
+ CSA_ASSERT(this, IsNumberNormalized(length));
+ GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
+ Branch(IsConstructor(receiver), &is_constructor, &is_not_constructor);
+
+ BIND(&is_constructor);
+ {
+ array = CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ receiver, length));
+ Goto(&done);
+ }
+
+ BIND(&is_not_constructor);
+ {
+ Label allocate_js_array(this);
+
+ Label next(this), runtime(this, Label::kDeferred);
+ TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
+ CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual,
+ length, SmiConstant(0), ok, not_ok);
+ });
+ // This check also transitively covers the case where length is too big
+ // to be representable by a SMI and so is not usable with
+ // AllocateJSArray.
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
+ limit, &runtime, &next);
+
+ BIND(&runtime);
+ {
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<JSFunction> array_function = CAST(
+ LoadContextElement(native_context, Context::ARRAY_FUNCTION_INDEX));
+ array = CallRuntime(Runtime::kNewArray, context, array_function, length,
+ array_function, UndefinedConstant());
+ Goto(&done);
+ }
+
+ BIND(&next);
+ CSA_ASSERT(this, TaggedIsSmi(length));
+
+ TNode<Map> array_map = CAST(LoadContextElement(
+ context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX));
+
+ // TODO(delphick): Consider using
+ // AllocateUninitializedJSArrayWithElements to avoid initializing an
+ // array and then writing over it.
+ array = CAST(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length,
+ SmiConstant(0), nullptr,
+ ParameterMode::SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return array.value();
+ }
+
+ void GenerateSetLength(TNode<Context> context, TNode<Object> array,
+ TNode<Number> length) {
+ Label fast(this), runtime(this), done(this);
+ // TODO(delphick): We should be able to skip the fast set altogether, if the
+ // length already equals the expected length, which it always is now on the
+ // fast path.
+ // Only set the length in this stub if
+ // 1) the array has fast elements,
+ // 2) the length is writable,
+ // 3) the new length is equal to the old length.
+
+ // 1) Check that the array has fast elements.
+ // TODO(delphick): Consider changing this since it does an an unnecessary
+ // check for SMIs.
+ // TODO(delphick): Also we could hoist this to after the array construction
+ // and copy the args into array in the same way as the Array constructor.
+ BranchIfFastJSArray(array, context, &fast, &runtime);
+
+ BIND(&fast);
+ {
+ TNode<JSArray> fast_array = CAST(array);
+
+ TNode<Smi> length_smi = CAST(length);
+ TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
+ CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
+
+ // 2) Ensure that the length is writable.
+ // TODO(delphick): This check may be redundant due to the
+ // BranchIfFastJSArray above.
+ EnsureArrayLengthWritable(LoadMap(fast_array), &runtime);
+
+ // 3) If the created array's length does not match the required length,
+ // then use the runtime to set the property as that will insert holes
+ // into excess elements or shrink the backing store as appropriate.
+ GotoIf(SmiNotEqual(length_smi, old_length), &runtime);
+
+ StoreObjectFieldNoWriteBarrier(fast_array, JSArray::kLengthOffset,
+ length_smi);
+
+ Goto(&done);
+ }
+
+ BIND(&runtime);
+ {
+ CallRuntime(Runtime::kSetProperty, context, static_cast<Node*>(array),
+ CodeStubAssembler::LengthStringConstant(), length,
+ SmiConstant(LanguageMode::kStrict));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ }
+};
+
+// ES #sec-array.from
+TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+
+ TNode<Object> map_function = args.GetOptionalArgumentValue(1);
+
+ // If map_function is not undefined, then ensure it's callable else throw.
+ {
+ Label no_error(this), error(this);
+ GotoIf(IsUndefined(map_function), &no_error);
+ GotoIf(TaggedIsSmi(map_function), &error);
+ Branch(IsCallable(map_function), &no_error, &error);
+
+ BIND(&error);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, map_function);
+
+ BIND(&no_error);
+ }
+
+ Label iterable(this), not_iterable(this), finished(this), if_exception(this);
+
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
+ TNode<Object> items = args.GetOptionalArgumentValue(0);
+ // The spec doesn't require ToObject to be called directly on the iterable
+ // branch, but it's part of GetMethod that is in the spec.
+ TNode<JSReceiver> array_like = ToObject(context, items);
+
+ TVARIABLE(Object, array);
+ TVARIABLE(Number, length);
+
+ // Determine whether items[Symbol.iterator] is defined:
+ IteratorBuiltinsAssembler iterator_assembler(state());
+ Node* iterator_method =
+ iterator_assembler.GetIteratorMethod(context, array_like);
+ Branch(IsNullOrUndefined(iterator_method), &not_iterable, &iterable);
+
+ BIND(&iterable);
+ {
+ TVARIABLE(Number, index, SmiConstant(0));
+ TVARIABLE(Object, var_exception);
+ Label loop(this, &index), loop_done(this),
+ on_exception(this, Label::kDeferred),
+ index_overflow(this, Label::kDeferred);
+
+ // Check that the method is callable.
+ {
+ Label get_method_not_callable(this, Label::kDeferred), next(this);
+ GotoIf(TaggedIsSmi(iterator_method), &get_method_not_callable);
+ GotoIfNot(IsCallable(iterator_method), &get_method_not_callable);
+ Goto(&next);
+
+ BIND(&get_method_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable,
+ iterator_method);
+
+ BIND(&next);
+ }
+
+ // Construct the output array with empty length.
+ array = ConstructArrayLike(context, args.GetReceiver());
+
+ // Actually get the iterator and throw if the iterator method does not yield
+ // one.
+ IteratorRecord iterator_record =
+ iterator_assembler.GetIterator(context, items, iterator_method);
+
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Object> fast_iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ Goto(&loop);
+
+ BIND(&loop);
+ {
+ // Loop while iterator is not done.
+ TNode<Object> next = CAST(iterator_assembler.IteratorStep(
+ context, iterator_record, &loop_done, fast_iterator_result_map));
+ TVARIABLE(Object, value,
+ CAST(iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map)));
+
+ // If a map_function is supplied then call it (using this_arg as
+ // receiver), on the value returned from the iterator. Exceptions are
+ // caught so the iterator can be closed.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(map_function), &next);
+
+ CSA_ASSERT(this, IsCallable(map_function));
+ Node* v = CallJS(CodeFactory::Call(isolate()), context, map_function,
+ this_arg, value.value(), index.value());
+ GotoIfException(v, &on_exception, &var_exception);
+ value = CAST(v);
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // Store the result in the output object (catching any exceptions so the
+ // iterator can be closed).
+ Node* define_status =
+ CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
+ index.value(), value.value());
+ GotoIfException(define_status, &on_exception, &var_exception);
+
+ index = NumberInc(index.value());
+
+ // The spec requires that we throw an exception if index reaches 2^53-1,
+ // but an empty loop would take >100 days to do this many iterations. To
+ // actually run for that long would require an iterator that never set
+ // done to true and a target array which somehow never ran out of memory,
+ // e.g. a proxy that discarded the values. Ignoring this case just means
+ // we would repeatedly call CreateDataProperty with index = 2^53.
+ CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) {
+ BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
+ NumberConstant(kMaxSafeInteger), ok,
+ not_ok);
+ });
+ Goto(&loop);
+ }
+
+ BIND(&loop_done);
+ {
+ length = index;
+ Goto(&finished);
+ }
+
+ BIND(&on_exception);
+ {
+ // Close the iterator, rethrowing either the passed exception or
+ // exceptions thrown during the close.
+ iterator_assembler.IteratorCloseOnException(context, iterator_record,
+ &var_exception);
+ }
+ }
+
+ // Since there's no iterator, items cannot be a Fast JS Array.
+ BIND(&not_iterable);
+ {
+ CSA_ASSERT(this, Word32BinaryNot(IsFastJSArray(array_like, context)));
+
+ // Treat array_like as an array and try to get its length.
+ length = CAST(ToLength_Inline(
+ context, GetProperty(context, array_like, factory()->length_string())));
+
+ // Construct an array using the receiver as constructor with the same length
+ // as the input array.
+ array = ConstructArrayLike(context, args.GetReceiver(), length.value());
+
+ TVARIABLE(Number, index, SmiConstant(0));
+
+ GotoIf(SmiEqual(length.value(), SmiConstant(0)), &finished);
+
+ // Loop from 0 to length-1.
+ {
+ Label loop(this, &index);
+ Goto(&loop);
+ BIND(&loop);
+ TVARIABLE(Object, value);
+
+ value = GetProperty(context, array_like, index.value());
+
+ // If a map_function is supplied then call it (using this_arg as
+ // receiver), on the value retrieved from the array.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(map_function), &next);
+
+ CSA_ASSERT(this, IsCallable(map_function));
+ value = CAST(CallJS(CodeFactory::Call(isolate()), context, map_function,
+ this_arg, value.value(), index.value()));
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // Store the result in the output object.
+ CallRuntime(Runtime::kCreateDataProperty, context, array.value(),
+ index.value(), value.value());
+ index = NumberInc(index.value());
+ BranchIfNumberRelationalComparison(Operation::kLessThan, index.value(),
+ length.value(), &loop, &finished);
+ }
+ }
+
+ BIND(&finished);
+
+ // Finally set the length on the output and return it.
+ GenerateSetLength(context, array.value(), length.value());
+ args.PopAndReturn(array.value());
+}
+
+// ES #sec-array.of
+TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<Smi> length = SmiFromInt32(argc);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ CodeStubArguments args(this, length, nullptr, ParameterMode::SMI_PARAMETERS);
+
+ TNode<Object> array = ConstructArrayLike(context, args.GetReceiver(), length);
+
+ // TODO(delphick): Avoid using CreateDataProperty on the fast path.
+ BuildFastLoop(SmiConstant(0), length,
+ [=](Node* index) {
+ CallRuntime(
+ Runtime::kCreateDataProperty, context,
+ static_cast<Node*>(array), index,
+ args.AtIndex(index, ParameterMode::SMI_PARAMETERS));
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ GenerateSetLength(context, array, length);
+ args.PopAndReturn(array);
+}
+
// ES #sec-get-%typedarray%.prototype.find
-TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1856,19 +2225,19 @@ TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.find",
- &ArrayBuiltinCodeStubAssembler::FindResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::FindResultGenerator,
+ &ArrayBuiltinsAssembler::FindProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
// ES #sec-get-%typedarray%.prototype.findIndex
-TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1877,14 +2246,14 @@ TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.findIndex",
- &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FindIndexProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::FindIndexResultGenerator,
+ &ArrayBuiltinsAssembler::FindIndexProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -1898,15 +2267,13 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1917,10 +2284,9 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -1931,13 +2297,13 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation,
initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayForEach, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1946,20 +2312,20 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.forEach",
- &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ &ArrayBuiltinsAssembler::ForEachResultGenerator,
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayForEachLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -1968,14 +2334,14 @@ TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.forEach",
- &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ForEachProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::ForEachResultGenerator,
+ &ArrayBuiltinsAssembler::ForEachProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2002,9 +2368,9 @@ TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
}
}
-TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2015,9 +2381,9 @@ TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
len, UndefinedConstant()));
}
-TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2031,18 +2397,17 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2050,21 +2415,20 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.some",
- &ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.some", &ArrayBuiltinsAssembler::SomeResultGenerator,
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2073,14 +2437,14 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.some",
- &ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
- &ArrayBuiltinCodeStubAssembler::SomeProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::SomeResultGenerator,
+ &ArrayBuiltinsAssembler::SomeProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2107,10 +2471,9 @@ TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
{ Return(FalseConstant()); }
}
-TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2121,9 +2484,9 @@ TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation,
len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2137,18 +2500,17 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2156,21 +2518,20 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.every",
- &ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.every", &ArrayBuiltinsAssembler::EveryResultGenerator,
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2179,14 +2540,14 @@ TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.every",
- &ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
- &ArrayBuiltinCodeStubAssembler::EveryProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::EveryResultGenerator,
+ &ArrayBuiltinsAssembler::EveryProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
-TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
@@ -2200,15 +2561,30 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
initial_k, len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReducePreLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiConstant(0), len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2220,10 +2596,9 @@ TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation,
accumulator, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
@@ -2235,13 +2610,13 @@ TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation,
result, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2249,21 +2624,20 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.reduce",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ "Array.prototype.reduce", &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2272,14 +2646,14 @@ TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduce",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction);
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction);
}
-TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
@@ -2293,15 +2667,31 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
initial_k, len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- MissingPropertyMode::kSkip, ForEachDirection::kReverse);
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction, MissingPropertyMode::kSkip,
+ ForEachDirection::kReverse);
}
-TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightPreLoopEagerDeoptContinuation,
+ ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ Node* callbackfn = Parameter(Descriptor::kCallbackFn);
+ Node* len = Parameter(Descriptor::kLength);
+
+ Callable stub(Builtins::CallableFor(
+ isolate(), Builtins::kArrayReduceRightLoopContinuation));
+ // Simulate starting the loop at 0, but ensuring that the accumulator is
+ // the hole. The continuation stub will search for the initial non-hole
+ // element, rightly throwing an exception if not found.
+ Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiConstant(0), len,
+ UndefinedConstant()));
+}
+
+TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
@@ -2313,10 +2703,9 @@ TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation,
accumulator, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
Node* len = Parameter(Descriptor::kLength);
@@ -2328,13 +2717,13 @@ TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation,
result, receiver, initial_k, len, UndefinedConstant()));
}
-TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2343,21 +2732,21 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingArrayBuiltinBody(
"Array.prototype.reduceRight",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
Builtins::CallableFor(isolate(),
Builtins::kArrayReduceRightLoopContinuation),
MissingPropertyMode::kSkip, ForEachDirection::kReverse);
}
-TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@@ -2366,15 +2755,15 @@ TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduceRight",
- &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
- &ArrayBuiltinCodeStubAssembler::ReduceProcessor,
- &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
+ &ArrayBuiltinsAssembler::ReduceResultGenerator,
+ &ArrayBuiltinsAssembler::ReduceProcessor,
+ &ArrayBuiltinsAssembler::ReducePostLoopAction,
ForEachDirection::kReverse);
}
-TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2388,15 +2777,13 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::FilterProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2409,10 +2796,9 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation,
to));
}
-TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
- ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2451,13 +2837,13 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation,
to.value()));
}
-TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayFilter, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2465,17 +2851,16 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.filter",
- &ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FilterProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.filter", &ArrayBuiltinsAssembler::FilterResultGenerator,
+ &ArrayBuiltinsAssembler::FilterProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2489,14 +2874,13 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- MissingPropertyMode::kSkip);
+ &ArrayBuiltinsAssembler::SpecCompliantMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction, MissingPropertyMode::kSkip);
}
-TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2508,9 +2892,9 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
UndefinedConstant()));
}
-TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
@@ -2533,13 +2917,13 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) {
UndefinedConstant()));
}
-TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2547,20 +2931,20 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
new_target, argc);
GenerateIteratingArrayBuiltinBody(
- "Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
- &ArrayBuiltinCodeStubAssembler::FastMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
+ "Array.prototype.map", &ArrayBuiltinsAssembler::MapResultGenerator,
+ &ArrayBuiltinsAssembler::FastMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction,
Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation),
MissingPropertyMode::kSkip);
}
-TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
- Node* argc =
+TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@@ -2569,9 +2953,9 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.map",
- &ArrayBuiltinCodeStubAssembler::TypedArrayMapResultGenerator,
- &ArrayBuiltinCodeStubAssembler::TypedArrayMapProcessor,
- &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
+ &ArrayBuiltinsAssembler::TypedArrayMapResultGenerator,
+ &ArrayBuiltinsAssembler::TypedArrayMapProcessor,
+ &ArrayBuiltinsAssembler::NullPostLoopAction);
}
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
@@ -2620,7 +3004,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
TNode<Object> receiver = args.GetReceiver();
TNode<Object> search_element =
args.GetOptionalArgumentValue(kSearchElementArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* intptr_zero = IntPtrConstant(0);
@@ -2999,7 +3383,8 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- void Generate_ArrayPrototypeIterationMethod(Node* context, Node* receiver,
+ void Generate_ArrayPrototypeIterationMethod(TNode<Context> context,
+ TNode<Object> receiver,
IterationKind iteration_kind) {
VARIABLE(var_array, MachineRepresentation::kTagged);
VARIABLE(var_map, MachineRepresentation::kTagged);
@@ -3009,15 +3394,17 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
Label create_array_iterator(this);
GotoIf(TaggedIsSmi(receiver), &if_isnotobject);
- var_array.Bind(receiver);
- var_map.Bind(LoadMap(receiver));
+
+ TNode<HeapObject> object_receiver = CAST(receiver);
+ var_array.Bind(object_receiver);
+ var_map.Bind(LoadMap(object_receiver));
var_type.Bind(LoadMapInstanceType(var_map.value()));
Branch(IsJSReceiverInstanceType(var_type.value()), &create_array_iterator,
&if_isnotobject);
BIND(&if_isnotobject);
{
- Node* result = CallBuiltin(Builtins::kToObject, context, receiver);
+ TNode<JSReceiver> result = ToObject(context, receiver);
var_array.Bind(result);
var_map.Bind(LoadMap(result));
var_type.Bind(LoadMapInstanceType(var_map.value()));
@@ -3031,31 +3418,30 @@ class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
};
TF_BUILTIN(ArrayPrototypeValues, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kValues);
}
TF_BUILTIN(ArrayPrototypeEntries, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kEntries);
}
TF_BUILTIN(ArrayPrototypeKeys, ArrayPrototypeIterationAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Generate_ArrayPrototypeIterationMethod(context, receiver,
IterationKind::kKeys);
}
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
- Handle<String> operation = factory()->NewStringFromAsciiChecked(
- "Array Iterator.prototype.next", TENURED);
+ const char* method_name = "Array Iterator.prototype.next";
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* iterator = Parameter(Descriptor::kReceiver);
VARIABLE(var_value, MachineRepresentation::kTagged);
@@ -3300,6 +3686,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
@@ -3309,19 +3697,23 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE,
};
Label uint8_values(this), int8_values(this), uint16_values(this),
int16_values(this), uint32_values(this), int32_values(this),
- float32_values(this), float64_values(this);
+ float32_values(this), float64_values(this), biguint64_values(this),
+ bigint64_values(this);
Label* kInstanceTypeHandlers[] = {
- &allocate_key_result, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values,
+ &allocate_key_result, &uint8_values, &uint8_values,
+ &int8_values, &uint16_values, &int16_values,
+ &uint32_values, &int32_values, &float32_values,
+ &float64_values, &biguint64_values, &bigint64_values,
+ &uint8_values, &uint8_values, &int8_values,
+ &uint16_values, &int16_values, &uint32_values,
+ &int32_values, &float32_values, &float64_values,
+ &biguint64_values, &bigint64_values,
};
var_done.Bind(FalseConstant());
@@ -3330,59 +3722,62 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&uint8_values);
{
- Node* value_uint8 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_uint8));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int8_values);
{
- Node* value_int8 = LoadFixedTypedArrayElement(
- data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_int8));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&uint16_values);
{
- Node* value_uint16 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_uint16));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int16_values);
{
- Node* value_int16 = LoadFixedTypedArrayElement(
- data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(SmiFromWord32(value_int16));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&uint32_values);
{
- Node* value_uint32 = LoadFixedTypedArrayElement(
- data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(ChangeUint32ToTagged(value_uint32));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&int32_values);
{
- Node* value_int32 = LoadFixedTypedArrayElement(
- data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(ChangeInt32ToTagged(value_int32));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&float32_values);
{
- Node* value_float32 = LoadFixedTypedArrayElement(
- data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(
- AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value_float32)));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
BIND(&float64_values);
{
- Node* value_float64 = LoadFixedTypedArrayElement(
- data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS);
- var_value.Bind(AllocateHeapNumberWithValue(value_float64));
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS));
+ Goto(&allocate_entry_if_needed);
+ }
+ BIND(&biguint64_values);
+ {
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, BIGUINT64_ELEMENTS, SMI_PARAMETERS));
+ Goto(&allocate_entry_if_needed);
+ }
+ BIND(&bigint64_values);
+ {
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, BIGINT64_ELEMENTS, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
}
@@ -3447,14 +3842,12 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&throw_bad_receiver);
{
// The {receiver} is not a valid JSArrayIterator.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- HeapConstant(operation), iterator);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), iterator);
}
BIND(&if_isdetached);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation,
- HeapConstant(operation));
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
new file mode 100644
index 0000000000..67ac51480c
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -0,0 +1,156 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
+#define V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayBuiltinsAssembler(compiler::CodeAssemblerState* state);
+
+ typedef std::function<void(ArrayBuiltinsAssembler* masm)>
+ BuiltinResultGenerator;
+
+ typedef std::function<Node*(ArrayBuiltinsAssembler* masm, Node* k_value,
+ Node* k)>
+ CallResultProcessor;
+
+ typedef std::function<void(ArrayBuiltinsAssembler* masm)> PostLoopAction;
+
+ enum class MissingPropertyMode { kSkip, kUseUndefined };
+
+ void FindResultGenerator();
+
+ Node* FindProcessor(Node* k_value, Node* k);
+
+ void FindIndexResultGenerator();
+
+ Node* FindIndexProcessor(Node* k_value, Node* k);
+
+ void ForEachResultGenerator();
+
+ Node* ForEachProcessor(Node* k_value, Node* k);
+
+ void SomeResultGenerator();
+
+ Node* SomeProcessor(Node* k_value, Node* k);
+
+ void EveryResultGenerator();
+
+ Node* EveryProcessor(Node* k_value, Node* k);
+
+ void ReduceResultGenerator();
+
+ Node* ReduceProcessor(Node* k_value, Node* k);
+
+ void ReducePostLoopAction();
+
+ void FilterResultGenerator();
+
+ Node* FilterProcessor(Node* k_value, Node* k);
+
+ void MapResultGenerator();
+
+ void TypedArrayMapResultGenerator();
+
+ Node* SpecCompliantMapProcessor(Node* k_value, Node* k);
+
+ Node* FastMapProcessor(Node* k_value, Node* k);
+
+ // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
+ Node* TypedArrayMapProcessor(Node* k_value, Node* k);
+
+ void NullPostLoopAction();
+
+ protected:
+ TNode<Context> context() { return context_; }
+ TNode<Object> receiver() { return receiver_; }
+ Node* new_target() { return new_target_; }
+ TNode<IntPtrT> argc() { return argc_; }
+ Node* o() { return o_; }
+ Node* len() { return len_; }
+ Node* callbackfn() { return callbackfn_; }
+ Node* this_arg() { return this_arg_; }
+ Node* k() { return k_.value(); }
+ Node* a() { return a_.value(); }
+
+ void ReturnFromBuiltin(Node* value);
+
+ void InitIteratingArrayBuiltinBody(TNode<Context> context,
+ TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* new_target,
+ TNode<IntPtrT> argc);
+
+ void GenerateIteratingArrayBuiltinBody(
+ const char* name, const BuiltinResultGenerator& generator,
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ const Callable& slow_case_continuation,
+ MissingPropertyMode missing_property_mode,
+ ForEachDirection direction = ForEachDirection::kForward);
+ void InitIteratingArrayBuiltinLoopContinuation(
+ TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
+ Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to);
+
+ void GenerateIteratingTypedArrayBuiltinBody(
+ const char* name, const BuiltinResultGenerator& generator,
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ ForEachDirection direction = ForEachDirection::kForward);
+
+ void GenerateIteratingArrayBuiltinLoopContinuation(
+ const CallResultProcessor& processor, const PostLoopAction& action,
+ MissingPropertyMode missing_property_mode,
+ ForEachDirection direction = ForEachDirection::kForward);
+
+ private:
+ static ElementsKind ElementsKindForInstanceType(InstanceType type);
+
+ void VisitAllTypedArrayElements(Node* array_buffer,
+ const CallResultProcessor& processor,
+ Label* detached, ForEachDirection direction);
+
+ void VisitAllFastElementsOneKind(ElementsKind kind,
+ const CallResultProcessor& processor,
+ Label* array_changed, ParameterMode mode,
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode);
+
+ void HandleFastElements(const CallResultProcessor& processor,
+ const PostLoopAction& action, Label* slow,
+ ForEachDirection direction,
+ MissingPropertyMode missing_property_mode);
+
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ // This version is specialized to create a zero length array
+ // of the elements kind of the input array.
+ void GenerateArraySpeciesCreate();
+
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ void GenerateArraySpeciesCreate(SloppyTNode<Smi> len);
+
+ Node* callbackfn_ = nullptr;
+ Node* o_ = nullptr;
+ Node* this_arg_ = nullptr;
+ Node* len_ = nullptr;
+ TNode<Context> context_;
+ TNode<Object> receiver_;
+ Node* new_target_ = nullptr;
+ TNode<IntPtrT> argc_;
+ Node* fast_typed_array_target_ = nullptr;
+ const char* name_ = nullptr;
+ Variable k_;
+ Variable a_;
+ Variable to_;
+ Label fully_spec_compliant_;
+ ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 70ee2326f5..f400e824f0 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -10,6 +10,7 @@
#include "src/contexts.h"
#include "src/counters.h"
#include "src/elements.h"
+#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
@@ -251,7 +252,7 @@ BUILTIN(ArraySlice) {
JSArray* array = JSArray::cast(*receiver);
if (V8_UNLIKELY(!array->HasFastElements() ||
!IsJSArrayFastElementMovingAllowed(isolate, array) ||
- !isolate->IsArraySpeciesLookupChainIntact() ||
+ !isolate->IsSpeciesLookupChainIntact() ||
// If this is a subclass of Array, then call out to JS
!array->HasArrayPrototype(isolate))) {
AllowHeapAllocation allow_allocation;
@@ -316,7 +317,7 @@ BUILTIN(ArraySplice) {
// If this is a subclass of Array, then call out to JS.
!Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
// If anything with @@species has been messed with, call out to JS.
- !isolate->IsArraySpeciesLookupChainIntact())) {
+ !isolate->IsSpeciesLookupChainIntact())) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -1186,7 +1187,7 @@ BUILTIN(ArrayConcat) {
// Avoid a real species read to avoid extra lookups to the array constructor
if (V8_LIKELY(receiver->IsJSArray() &&
Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
- isolate->IsArraySpeciesLookupChainIntact())) {
+ isolate->IsSpeciesLookupChainIntact())) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 0d0e34ee0d..0db53c687e 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -21,37 +21,18 @@ class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
Node* const awaited, Node* const outer_promise,
const bool is_predicted_as_caught);
- void AsyncFunctionAwaitResumeClosure(
- Node* const context, Node* const sent_value,
- JSGeneratorObject::ResumeMode resume_mode);
+ void AsyncFunctionAwaitResume(Node* const context, Node* const argument,
+ Node* const generator,
+ JSGeneratorObject::ResumeMode resume_mode);
};
-namespace {
-
-// Describe fields of Context associated with AsyncFunctionAwait resume
-// closures.
-// TODO(jgruber): Refactor to reuse code for upcoming async-generators.
-class AwaitContext {
- public:
- enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
-};
-
-} // anonymous namespace
-
-void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
- Node* context, Node* sent_value,
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResume(
+ Node* const context, Node* const argument, Node* const generator,
JSGeneratorObject::ResumeMode resume_mode) {
+ CSA_ASSERT(this, IsJSGeneratorObject(generator));
DCHECK(resume_mode == JSGeneratorObject::kNext ||
resume_mode == JSGeneratorObject::kThrow);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
-
- // Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
- // unnecessary runtime checks removed.
- // TODO(jgruber): Refactor to reuse code from builtins-generator.cc.
-
// Ensure that the generator is neither closed nor running.
CSA_SLOW_ASSERT(
this,
@@ -66,31 +47,23 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
- CallStub(callable, context, sent_value, generator);
-
- // The resulting Promise is a throwaway, so it doesn't matter what it
- // resolves to. What is important is that we don't end up keeping the
- // whole chain of intermediate Promises alive by returning the return value
- // of ResumeGenerator, as that would create a memory leak.
+ TailCallStub(callable, context, argument, generator);
}
-TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- Node* const sentError = Parameter(Descriptor::kSentError);
+TF_BUILTIN(AsyncFunctionAwaitFulfill, AsyncFunctionBuiltinsAssembler) {
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const context = Parameter(Descriptor::kContext);
-
- AsyncFunctionAwaitResumeClosure(context, sentError,
- JSGeneratorObject::kThrow);
- Return(UndefinedConstant());
+ AsyncFunctionAwaitResume(context, argument, generator,
+ JSGeneratorObject::kNext);
}
-TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- Node* const sentValue = Parameter(Descriptor::kSentValue);
+TF_BUILTIN(AsyncFunctionAwaitReject, AsyncFunctionBuiltinsAssembler) {
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const generator = Parameter(Descriptor::kGenerator);
Node* const context = Parameter(Descriptor::kContext);
-
- AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
- Return(UndefinedConstant());
+ AsyncFunctionAwaitResume(context, argument, generator,
+ JSGeneratorObject::kThrow);
}
// ES#abstract-ops-async-function-await
@@ -105,25 +78,12 @@ TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Node* const context, Node* const generator, Node* const awaited,
Node* const outer_promise, const bool is_predicted_as_caught) {
- CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
- // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
- // the awaited promise if it is already a promise. Reuse is non-spec compliant
- // but part of our old behavior gives us a couple of percent
- // performance boost.
- // TODO(jgruber): Use a faster specialized version of
- // InternalPerformPromiseThen.
-
- Await(context, generator, awaited, outer_promise, AwaitContext::kLength,
- init_closure_context, Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
- Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN,
- is_predicted_as_caught);
+ CSA_SLOW_ASSERT(this, IsJSGeneratorObject(generator));
+ CSA_SLOW_ASSERT(this, IsJSPromise(outer_promise));
+
+ Await(context, generator, awaited, outer_promise,
+ Builtins::kAsyncFunctionAwaitFulfill,
+ Builtins::kAsyncFunctionAwaitReject, is_predicted_as_caught);
// Return outer promise to avoid adding an load of the outer promise before
// suspending in BytecodeGenerator.
@@ -133,30 +93,28 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const value = Parameter(Descriptor::kValue);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
static const bool kIsPredictedAsCaught = true;
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ AsyncFunctionAwait(context, generator, value, outer_promise,
kIsPredictedAsCaught);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(Descriptor::kGenerator);
- Node* const awaited = Parameter(Descriptor::kAwaited);
+ Node* const value = Parameter(Descriptor::kValue);
Node* const outer_promise = Parameter(Descriptor::kOuterPromise);
Node* const context = Parameter(Descriptor::kContext);
static const bool kIsPredictedAsCaught = false;
- AsyncFunctionAwait(context, generator, awaited, outer_promise,
+ AsyncFunctionAwait(context, generator, value, outer_promise,
kIsPredictedAsCaught);
}
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 0cdcb57a3f..7958afba00 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -13,6 +13,58 @@ namespace internal {
using compiler::Node;
+void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
+ Node* outer_promise,
+ Builtins::Name fulfill_builtin,
+ Builtins::Name reject_builtin,
+ Node* is_predicted_as_caught) {
+ CSA_SLOW_ASSERT(this, Word32Or(IsJSAsyncGeneratorObject(generator),
+ IsJSGeneratorObject(generator)));
+ CSA_SLOW_ASSERT(this, IsJSPromise(outer_promise));
+ CSA_SLOW_ASSERT(this, IsBoolean(is_predicted_as_caught));
+
+ Node* const native_context = LoadNativeContext(context);
+
+ // TODO(bmeurer): This could be optimized and folded into a single allocation.
+ Node* const promise = AllocateAndInitJSPromise(native_context);
+ Node* const promise_reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* const fulfill_handler =
+ HeapConstant(Builtins::CallableFor(isolate(), fulfill_builtin).code());
+ Node* const reject_handler =
+ HeapConstant(Builtins::CallableFor(isolate(), reject_builtin).code());
+ Node* const reaction = AllocatePromiseReaction(
+ promise_reactions, generator, fulfill_handler, reject_handler);
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction);
+ PromiseSetHasHandler(promise);
+
+ // Perform ! Call(promiseCapability.[[Resolve]], undefined, « value »).
+ CallBuiltin(Builtins::kResolvePromise, native_context, promise, value);
+
+ // When debugging, we need to link from the {generator} to the
+ // {outer_promise} of the async function/generator.
+ Label done(this);
+ GotoIfNot(IsDebugActive(), &done);
+ CallRuntime(Runtime::kSetProperty, native_context, generator,
+ LoadRoot(Heap::kgenerator_outer_promise_symbolRootIndex),
+ outer_promise, SmiConstant(LanguageMode::kStrict));
+ GotoIf(IsFalse(is_predicted_as_caught), &done);
+ GotoIf(TaggedIsSmi(value), &done);
+ GotoIfNot(IsJSPromise(value), &done);
+ PromiseSetHandledHint(value);
+ Goto(&done);
+ BIND(&done);
+}
+
+void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
+ Node* outer_promise,
+ Builtins::Name fulfill_builtin,
+ Builtins::Name reject_builtin,
+ bool is_predicted_as_caught) {
+ return Await(context, generator, value, outer_promise, fulfill_builtin,
+ reject_builtin, BooleanConstant(is_predicted_as_caught));
+}
+
namespace {
// Describe fields of Context associated with the AsyncIterator unwrap closure.
class ValueUnwrapContext {
@@ -22,165 +74,6 @@ class ValueUnwrapContext {
} // namespace
-Node* AsyncBuiltinsAssembler::Await(
- Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length, const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught) {
- DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
-
- Node* const native_context = LoadNativeContext(context);
-
- static const int kWrappedPromiseOffset = FixedArray::SizeFor(context_length);
- static const int kThrowawayPromiseOffset =
- kWrappedPromiseOffset + JSPromise::kSizeWithEmbedderFields;
- static const int kResolveClosureOffset =
- kThrowawayPromiseOffset + JSPromise::kSizeWithEmbedderFields;
- static const int kRejectClosureOffset =
- kResolveClosureOffset + JSFunction::kSizeWithoutPrototype;
- static const int kTotalSize =
- kRejectClosureOffset + JSFunction::kSizeWithoutPrototype;
-
- Node* const base = AllocateInNewSpace(kTotalSize);
- Node* const closure_context = base;
- {
- // Initialize closure context
- InitializeFunctionContext(native_context, closure_context, context_length);
- init_closure_context(closure_context);
- }
-
- // Let promiseCapability be ! NewPromiseCapability(%Promise%).
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- Node* const promise_map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- // Assert that the JSPromise map has an instance size is
- // JSPromise::kSizeWithEmbedderFields.
- CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(promise_map),
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
- kPointerSize)));
- Node* const wrapped_value = InnerAllocate(base, kWrappedPromiseOffset);
- {
- // Initialize Promise
- StoreMapNoWriteBarrier(wrapped_value, promise_map);
- InitializeJSObjectFromMap(
- wrapped_value, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(wrapped_value);
- }
-
- Node* const throwaway = InnerAllocate(base, kThrowawayPromiseOffset);
- {
- // Initialize throwawayPromise
- StoreMapNoWriteBarrier(throwaway, promise_map);
- InitializeJSObjectFromMap(
- throwaway, promise_map,
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields));
- PromiseInit(throwaway);
- }
-
- Node* const on_resolve = InnerAllocate(base, kResolveClosureOffset);
- {
- // Initialize resolve handler
- InitializeNativeClosure(closure_context, native_context, on_resolve,
- on_resolve_context_index);
- }
-
- Node* const on_reject = InnerAllocate(base, kRejectClosureOffset);
- {
- // Initialize reject handler
- InitializeNativeClosure(closure_context, native_context, on_reject,
- on_reject_context_index);
- }
-
- {
- // Add PromiseHooks if needed
- Label next(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &next);
- CallRuntime(Runtime::kPromiseHookInit, context, wrapped_value,
- outer_promise);
- CallRuntime(Runtime::kPromiseHookInit, context, throwaway, wrapped_value);
- Goto(&next);
- BIND(&next);
- }
-
- // Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapped_value, value);
-
- // The Promise will be thrown away and not handled, but it shouldn't trigger
- // unhandled reject events as its work is done
- PromiseSetHasHandler(throwaway);
-
- Label do_perform_promise_then(this);
- GotoIfNot(IsDebugActive(), &do_perform_promise_then);
- {
- Label common(this);
- GotoIf(TaggedIsSmi(value), &common);
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
- {
- // Mark the reject handler callback to be a forwarding edge, rather
- // than a meaningful catch handler
- Node* const key =
- HeapConstant(factory()->promise_forwarding_handler_symbol());
- CallRuntime(Runtime::kSetProperty, context, on_reject, key,
- TrueConstant(), SmiConstant(LanguageMode::kStrict));
-
- GotoIf(IsFalse(is_predicted_as_caught), &common);
- PromiseSetHandledHint(value);
- }
-
- Goto(&common);
- BIND(&common);
- // Mark the dependency to outer Promise in case the throwaway Promise is
- // found on the Promise stack
- CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
-
- Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
- SmiConstant(LanguageMode::kStrict));
- }
-
- Goto(&do_perform_promise_then);
- BIND(&do_perform_promise_then);
-
- CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapped_value,
- on_resolve, on_reject, throwaway);
-
- return wrapped_value;
-}
-
-void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context,
- Node* native_context,
- Node* function,
- Node* context_index) {
- Node* const function_map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- // Ensure that we don't have to initialize prototype_or_initial_map field of
- // JSFunction.
- CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map),
- IntPtrConstant(JSFunction::kSizeWithoutPrototype /
- kPointerSize)));
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- StoreMapNoWriteBarrier(function, function_map);
- StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(function, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(function, JSFunction::kFeedbackVectorOffset,
- Heap::kUndefinedCellRootIndex);
-
- Node* shared_info = LoadContextElement(native_context, context_index);
- CSA_ASSERT(this, IsSharedFunctionInfo(shared_info));
- StoreObjectFieldNoWriteBarrier(
- function, JSFunction::kSharedFunctionInfoOffset, shared_info);
- StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
-
- Node* const code =
- LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset);
- StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
-}
-
Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
Node* done) {
Node* const map = LoadContextElement(
diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h
index 212b0b618b..70f68a498b 100644
--- a/deps/v8/src/builtins/builtins-async-gen.h
+++ b/deps/v8/src/builtins/builtins-async-gen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_ASYNC_H_
-#define V8_BUILTINS_BUILTINS_ASYNC_H_
+#ifndef V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
+#define V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
#include "src/builtins/builtins-promise-gen.h"
@@ -16,51 +16,26 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
: PromiseBuiltinsAssembler(state) {}
protected:
- typedef std::function<void(Node*)> ContextInitializer;
-
- // Perform steps to resume generator after `value` is resolved.
- // `on_reject_context_index` is an index into the Native Context, which should
- // point to a SharedFunctioninfo instance used to create the closure. The
- // value following the reject index should be a similar value for the resolve
- // closure. Returns the Promise-wrapped `value`.
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- Node* on_resolve_context_index, Node* on_reject_context_index,
- Node* is_predicted_as_caught);
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index, int on_reject_context_index,
- Node* is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, IntPtrConstant(on_resolve_context_index),
- IntPtrConstant(on_reject_context_index),
- is_predicted_as_caught);
- }
- Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
- int context_length,
- const ContextInitializer& init_closure_context,
- int on_resolve_context_index, int on_reject_context_index,
- bool is_predicted_as_caught) {
- return Await(context, generator, value, outer_promise, context_length,
- init_closure_context, on_resolve_context_index,
- on_reject_context_index,
- BooleanConstant(is_predicted_as_caught));
- }
+ void Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+ Builtins::Name fulfill_builtin, Builtins::Name reject_builtin,
+ Node* is_predicted_as_caught);
+ void Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+ Builtins::Name fulfill_builtin, Builtins::Name reject_builtin,
+ bool is_predicted_as_caught);
// Return a new built-in function object as defined in
// Async Iterator Value Unwrap Functions
Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
private:
- void InitializeNativeClosure(Node* context, Node* native_context,
- Node* function, Node* context_index);
Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
Node* done);
+ Node* AllocateAwaitPromiseJobTask(Node* generator, Node* fulfill_handler,
+ Node* reject_handler, Node* promise,
+ Node* context);
};
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_ASYNC_H_
+#endif // V8_BUILTINS_BUILTINS_ASYNC_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 70726a5f9d..b78747aaa9 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -68,24 +68,24 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
return IsGeneratorStateNotExecuting(LoadGeneratorState(generator));
}
- inline Node* LoadGeneratorAwaitedPromise(Node* const generator) {
- return LoadObjectField(generator,
- JSAsyncGeneratorObject::kAwaitedPromiseOffset);
+ inline Node* IsGeneratorAwaiting(Node* const generator) {
+ Node* is_generator_awaiting =
+ LoadObjectField(generator, JSAsyncGeneratorObject::kIsAwaitingOffset);
+ return SmiEqual(is_generator_awaiting, SmiConstant(1));
}
- inline Node* IsGeneratorNotSuspendedForAwait(Node* const generator) {
- return IsUndefined(LoadGeneratorAwaitedPromise(generator));
- }
-
- inline Node* IsGeneratorSuspendedForAwait(Node* const generator) {
- return HasInstanceType(LoadGeneratorAwaitedPromise(generator),
- JS_PROMISE_TYPE);
+ inline void SetGeneratorAwaiting(Node* const generator) {
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1));
+ CSA_ASSERT(this, IsGeneratorAwaiting(generator));
}
- inline void ClearAwaitedPromise(Node* const generator) {
- StoreObjectFieldRoot(generator,
- JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- Heap::kUndefinedValueRootIndex);
+ inline void SetGeneratorNotAwaiting(Node* const generator) {
+ CSA_ASSERT(this, IsGeneratorAwaiting(generator));
+ StoreObjectFieldNoWriteBarrier(
+ generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
}
inline void CloseGenerator(Node* const generator) {
@@ -140,8 +140,8 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
// for AsyncGenerators.
template <typename Descriptor>
void AsyncGeneratorAwait(bool is_catchable);
- void AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+ void AsyncGeneratorAwaitResume(
+ Node* context, Node* generator, Node* argument,
JSAsyncGeneratorObject::ResumeMode resume_mode);
};
@@ -193,7 +193,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue(
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
StringConstant(method_name), generator);
- CallBuiltin(Builtins::kRejectNativePromise, context, promise, error,
+ CallBuiltin(Builtins::kRejectPromise, context, promise, error,
TrueConstant());
args->PopAndReturn(promise);
}
@@ -219,21 +219,12 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest(
return request;
}
-void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
- Node* context, Node* value,
+void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResume(
+ Node* context, Node* generator, Node* argument,
JSAsyncGeneratorObject::ResumeMode resume_mode) {
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
-#if defined(DEBUG) && defined(ENABLE_SLOW_DCHECKS)
- Node* const awaited_promise = LoadGeneratorAwaitedPromise(generator);
- CSA_SLOW_ASSERT(this, HasInstanceType(awaited_promise, JS_PROMISE_TYPE));
- CSA_SLOW_ASSERT(this, Word32NotEqual(PromiseStatus(awaited_promise),
- Int32Constant(v8::Promise::kPending)));
-#endif
-
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
@@ -242,40 +233,30 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
JSGeneratorObject::kResumeModeOffset,
SmiConstant(resume_mode));
- CallStub(CodeFactory::ResumeGenerator(isolate()), context, value, generator);
+ CallStub(CodeFactory::ResumeGenerator(isolate()), context, argument,
+ generator);
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- Node* generator = Parameter(Descriptor::kGenerator);
- Node* value = Parameter(Descriptor::kAwaited);
- Node* context = Parameter(Descriptor::kContext);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
CSA_ASSERT(this, IsNotUndefined(request));
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
Node* outer_promise =
LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset);
- const int resolve_index = Context::ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN;
- const int reject_index = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
- Node* promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, resolve_index, reject_index, is_catchable);
-
- CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ SetGeneratorAwaiting(generator);
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorAwaitFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_catchable);
Return(UndefinedConstant());
}
@@ -386,18 +367,20 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
"[AsyncGenerator].prototype.throw");
}
-TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorAwaitResumeClosure(context, value,
- JSAsyncGeneratorObject::kNext);
+TF_BUILTIN(AsyncGeneratorAwaitFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const context = Parameter(Descriptor::kContext);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSAsyncGeneratorObject::kNext);
}
-TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) {
- Node* value = Parameter(Descriptor::kValue);
- Node* context = Parameter(Descriptor::kContext);
- AsyncGeneratorAwaitResumeClosure(context, value,
- JSAsyncGeneratorObject::kThrow);
+TF_BUILTIN(AsyncGeneratorAwaitReject, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
+ Node* const context = Parameter(Descriptor::kContext);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSAsyncGeneratorObject::kThrow);
}
TF_BUILTIN(AsyncGeneratorAwaitUncaught, AsyncGeneratorBuiltinsAssembler) {
@@ -435,7 +418,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
CSA_ASSERT(this, IsGeneratorNotExecuting(generator));
// Stop resuming if suspended for Await.
- ReturnIf(IsGeneratorSuspendedForAwait(generator), UndefinedConstant());
+ ReturnIf(IsGeneratorAwaiting(generator), UndefinedConstant());
// Stop resuming if request queue is empty.
ReturnIf(IsUndefined(var_next.value()), UndefinedConstant());
@@ -452,10 +435,9 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
&settle_promise);
CloseGenerator(generator);
var_state.Bind(SmiConstant(JSGeneratorObject::kGeneratorClosed));
-
Goto(&settle_promise);
- BIND(&settle_promise);
+ BIND(&settle_promise);
Node* next_value = LoadValueFromAsyncGeneratorRequest(next);
Branch(SmiEqual(resume_type, SmiConstant(JSGeneratorObject::kReturn)),
&if_return, &if_throw);
@@ -511,7 +493,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator));
- CSA_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
+ CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
// If this assertion fails, the `value` component was not Awaited as it should
// have been, per https://github.com/tc39/proposal-async-iteration/pull/102/.
@@ -537,7 +519,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
}
// Perform Call(promiseCapability.[[Resolve]], undefined, «iteratorResult»).
- CallBuiltin(Builtins::kResolveNativePromise, context, promise, iter_result);
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
// Per spec, AsyncGeneratorResolve() returns undefined. However, for the
// benefit of %TraceExit(), return the Promise.
@@ -553,7 +535,7 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator);
Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
- Return(CallBuiltin(Builtins::kRejectNativePromise, context, promise, value,
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, value,
TrueConstant()));
}
@@ -566,34 +548,23 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request);
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
-
- const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN;
- const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN;
-
- Node* const promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, on_resolve, on_reject, is_caught);
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ SetGeneratorAwaiting(generator);
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorYieldFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
Return(UndefinedConstant());
}
-TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorYieldFulfill, AsyncGeneratorBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// Per proposal-async-iteration/#sec-asyncgeneratoryield step 9
// Return ! AsyncGeneratorResolve(_F_.[[Generator]], _value_, *false*).
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
+ CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, argument,
FalseConstant());
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
@@ -619,42 +590,33 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
Node* const generator = Parameter(Descriptor::kGenerator);
Node* const value = Parameter(Descriptor::kValue);
Node* const is_caught = Parameter(Descriptor::kIsCaught);
+ Node* const context = Parameter(Descriptor::kContext);
Node* const req = LoadFirstAsyncGeneratorRequestFromQueue(generator);
+ Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
CSA_ASSERT(this, IsNotUndefined(req));
- Label perform_await(this);
- VARIABLE(var_on_resolve, MachineType::PointerRepresentation(),
- IntPtrConstant(
- Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN));
- VARIABLE(
- var_on_reject, MachineType::PointerRepresentation(),
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN));
-
+ Label if_closed(this, Label::kDeferred), if_not_closed(this), done(this);
Node* const state = LoadGeneratorState(generator);
- GotoIf(IsGeneratorStateClosed(state), &perform_await);
- var_on_resolve.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN));
- var_on_reject.Bind(
- IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN));
- Goto(&perform_await);
+ SetGeneratorAwaiting(generator);
+ Branch(IsGeneratorStateClosed(state), &if_closed, &if_not_closed);
- BIND(&perform_await);
+ BIND(&if_closed);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorReturnClosedFulfill,
+ Builtins::kAsyncGeneratorReturnClosedReject, is_caught);
+ Goto(&done);
+ }
- ContextInitializer init_closure_context = [&](Node* context) {
- StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
- generator);
- };
+ BIND(&if_not_closed);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorReturnFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
+ Goto(&done);
+ }
- Node* const context = Parameter(Descriptor::kContext);
- Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req);
- Node* const promise =
- Await(context, generator, value, outer_promise, AwaitContext::kLength,
- init_closure_context, var_on_resolve.value(), var_on_reject.value(),
- is_caught);
-
- CSA_SLOW_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
- StoreObjectField(generator, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
- promise);
+ BIND(&done);
Return(UndefinedConstant());
}
@@ -662,49 +624,44 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// Resume the generator with "return" resume_mode, and finally perform
// AsyncGeneratorResumeNext. Per
// proposal-async-iteration/#sec-asyncgeneratoryield step 8.e
-TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn);
+ AsyncGeneratorAwaitResume(context, generator, argument,
+ JSGeneratorObject::kReturn);
}
// On-resolve closure for Await in AsyncGeneratorReturn
// Perform AsyncGeneratorResolve({awaited_value}, true) and finally perform
// AsyncGeneratorResumeNext.
-TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnClosedFulfill, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// https://tc39.github.io/proposal-async-iteration/
// #async-generator-resume-next-return-processor-fulfilled step 2:
// Return ! AsyncGeneratorResolve(_F_.[[Generator]], _value_, *true*).
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
+ CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, argument,
TrueConstant());
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
-TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
- AsyncGeneratorBuiltinsAssembler) {
+TF_BUILTIN(AsyncGeneratorReturnClosedReject, AsyncGeneratorBuiltinsAssembler) {
+ Node* const generator = Parameter(Descriptor::kGenerator);
+ Node* const argument = Parameter(Descriptor::kArgument);
Node* const context = Parameter(Descriptor::kContext);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const generator =
- LoadContextElement(context, AwaitContext::kGeneratorSlot);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspendedForAwait(generator));
- ClearAwaitedPromise(generator);
+ SetGeneratorNotAwaiting(generator);
// https://tc39.github.io/proposal-async-iteration/
// #async-generator-resume-next-return-processor-rejected step 2:
// Return ! AsyncGeneratorReject(_F_.[[Generator]], _reason_).
- CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, value);
+ CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, argument);
TailCallBuiltin(Builtins::kAsyncGeneratorResumeNext, context, generator);
}
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index f232b32700..58691bd00e 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -28,13 +28,29 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
typedef std::function<void(Node* const context, Node* const promise,
Label* if_exception)>
UndefinedMethodHandler;
+ typedef std::function<Node*(Node*)> SyncIteratorNodeGenerator;
void Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
- Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
Node* const initial_exception_value = nullptr);
+ void Generate_AsyncFromSyncIteratorMethod(
+ Node* const context, Node* const iterator, Node* const sent_value,
+ Handle<String> name, const UndefinedMethodHandler& if_method_undefined,
+ const char* operation_name,
+ Label::Type reject_label_type = Label::kDeferred,
+ Node* const initial_exception_value = nullptr) {
+ auto get_method = [=](Node* const sync_iterator) {
+ return GetProperty(context, sync_iterator, name);
+ };
+ return Generate_AsyncFromSyncIteratorMethod(
+ context, iterator, sent_value, get_method, if_method_undefined,
+ operation_name, reject_label_type, initial_exception_value);
+ }
+
// Load "value" and "done" from an iterator result object. If an exception
// is thrown at any point, jumps to te `if_exception` label with exception
// stored in `var_exception`.
@@ -79,7 +95,8 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
- Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+ const SyncIteratorNodeGenerator& get_method,
+ const UndefinedMethodHandler& if_method_undefined,
const char* operation_name, Label::Type reject_label_type,
Node* const initial_exception_value) {
Node* const native_context = LoadNativeContext(context);
@@ -96,7 +113,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
Node* const sync_iterator =
LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
- Node* const method = GetProperty(context, sync_iterator, method_name);
+ Node* const method = get_method(sync_iterator);
if (if_method_undefined) {
Label if_isnotundefined(this);
@@ -119,7 +136,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Perform ! Call(valueWrapperCapability.[[Resolve]], undefined, «
// throwValue »).
- CallBuiltin(Builtins::kResolveNativePromise, context, wrapper, value);
+ CallBuiltin(Builtins::kResolvePromise, context, wrapper, value);
// Let onFulfilled be a new built-in function object as defined in
// Async Iterator Value Unwrap Functions.
@@ -128,13 +145,13 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Perform ! PerformPromiseThen(valueWrapperCapability.[[Promise]],
// onFulfilled, undefined, promiseCapability).
- Return(CallBuiltin(Builtins::kPerformNativePromiseThen, context, wrapper,
+ Return(CallBuiltin(Builtins::kPerformPromiseThen, context, wrapper,
on_fulfilled, UndefinedConstant(), promise));
BIND(&reject_promise);
{
Node* const exception = var_exception.value();
- CallBuiltin(Builtins::kRejectNativePromise, context, promise, exception,
+ CallBuiltin(Builtins::kRejectPromise, context, promise, exception,
TrueConstant());
Return(promise);
}
@@ -211,6 +228,7 @@ std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
BIND(&done);
return std::make_pair(var_value.value(), var_done.value());
}
+
} // namespace
// https://tc39.github.io/proposal-async-iteration/
@@ -220,9 +238,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
+ auto get_method = [=](Node* const unused) {
+ return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset);
+ };
Generate_AsyncFromSyncIteratorMethod(
- context, iterator, value, factory()->next_string(),
- UndefinedMethodHandler(), "[Async-from-Sync Iterator].prototype.next");
+ context, iterator, value, get_method, UndefinedMethodHandler(),
+ "[Async-from-Sync Iterator].prototype.next");
}
// https://tc39.github.io/proposal-async-iteration/
@@ -243,7 +264,7 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
// IfAbruptRejectPromise(nextDone, promiseCapability).
// Return promiseCapability.[[Promise]].
- PromiseFulfill(context, promise, iter_result, v8::Promise::kFulfilled);
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
Return(promise);
};
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 6d9bb6e797..fdbd3937d4 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -36,33 +36,6 @@ BUILTIN(BigIntConstructor_ConstructStub) {
isolate->factory()->BigInt_string()));
}
-BUILTIN(BigIntParseInt) {
- HandleScope scope(isolate);
- Handle<Object> string = args.atOrUndefined(isolate, 1);
- Handle<Object> radix = args.atOrUndefined(isolate, 2);
-
- // Convert {string} to a String and flatten it.
- // Fast path: avoid back-and-forth conversion for Smi inputs.
- if (string->IsSmi() && radix->IsUndefined(isolate)) {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, string));
- }
- Handle<String> subject;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
- Object::ToString(isolate, string));
- subject = String::Flatten(subject);
-
- // Convert {radix} to Int32.
- if (!radix->IsNumber()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix, Object::ToNumber(radix));
- }
- int radix32 = DoubleToInt32(radix->Number());
- if (radix32 != 0 && (radix32 < 2 || radix32 > 36)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewSyntaxError(MessageTemplate::kToRadixFormatRange));
- }
- RETURN_RESULT_OR_FAILURE(isolate, BigIntParseInt(isolate, subject, radix32));
-}
-
BUILTIN(BigIntAsUintN) {
HandleScope scope(isolate);
Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
@@ -97,14 +70,6 @@ BUILTIN(BigIntAsIntN) {
return *BigInt::AsIntN(bits->Number(), bigint);
}
-BUILTIN(BigIntPrototypeToLocaleString) {
- HandleScope scope(isolate);
-
- // TODO(jkummerow): Implement.
-
- UNIMPLEMENTED();
-}
-
namespace {
MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
@@ -127,18 +92,14 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
BigInt);
}
-} // namespace
-
-BUILTIN(BigIntPrototypeToString) {
- HandleScope scope(isolate);
+Object* BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
+ Isolate* isolate, const char* builtin_name) {
// 1. Let x be ? thisBigIntValue(this value).
Handle<BigInt> x;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, x,
- ThisBigIntValue(isolate, args.receiver(), "BigInt.prototype.toString"));
+ isolate, x, ThisBigIntValue(isolate, receiver, builtin_name));
// 2. If radix is not present, let radixNumber be 10.
// 3. Else if radix is undefined, let radixNumber be 10.
- Handle<Object> radix = args.atOrUndefined(isolate, 1);
int radix_number;
if (radix->IsUndefined(isolate)) {
radix_number = 10;
@@ -158,6 +119,22 @@ BUILTIN(BigIntPrototypeToString) {
RETURN_RESULT_OR_FAILURE(isolate, BigInt::ToString(x, radix_number));
}
+} // namespace
+
+BUILTIN(BigIntPrototypeToLocaleString) {
+ HandleScope scope(isolate);
+ Handle<Object> radix = isolate->factory()->undefined_value();
+ return BigIntToStringImpl(args.receiver(), radix, isolate,
+ "BigInt.prototype.toLocaleString");
+}
+
+BUILTIN(BigIntPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> radix = args.atOrUndefined(isolate, 1);
+ return BigIntToStringImpl(args.receiver(), radix, isolate,
+ "BigInt.prototype.toString");
+}
+
BUILTIN(BigIntPrototypeValueOf) {
HandleScope scope(isolate);
RETURN_RESULT_OR_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index d4a7153d74..7443202c98 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -168,7 +168,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
Node* elements_length = LoadFixedArrayBaseLength(elements);
GotoIfNot(WordEqual(length, elements_length), &if_runtime);
var_elements.Bind(elements);
- var_length.Bind(SmiToWord32(length));
+ var_length.Bind(SmiToInt32(length));
Goto(&if_done);
}
@@ -289,12 +289,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
&if_runtime);
// Check that the map of the initial array iterator hasn't changed.
- Node* native_context = LoadNativeContext(context);
- Node* arr_it_proto_map = LoadMap(CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)));
- Node* initial_map = LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX);
- GotoIfNot(WordEqual(arr_it_proto_map, initial_map), &if_runtime);
+ TNode<Context> native_context = LoadNativeContext(context);
+ GotoIfNot(HasInitialArrayIteratorPrototypeMap(native_context), &if_runtime);
Node* kind = LoadMapElementsKind(spread_map);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 392040c995..563703707c 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -26,31 +26,32 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
virtual ~BaseCollectionsAssembler() {}
protected:
- enum Variant { kMap, kSet };
+ enum Variant { kMap, kSet, kWeakMap, kWeakSet };
// Adds an entry to a collection. For Maps, properly handles extracting the
// key and value from the entry (see LoadKeyValue()).
- TNode<Object> AddConstructorEntry(Variant variant, TNode<Context> context,
- TNode<Object> collection,
- TNode<Object> add_function,
- TNode<Object> key_value,
- Label* if_exception = nullptr,
- TVariable<Object>* var_exception = nullptr);
+ void AddConstructorEntry(Variant variant, TNode<Context> context,
+ TNode<Object> collection, TNode<Object> add_function,
+ TNode<Object> key_value,
+ Label* if_may_have_side_effects = nullptr,
+ Label* if_exception = nullptr,
+ TVariable<Object>* var_exception = nullptr);
// Adds constructor entries to a collection. Choosing a fast path when
// possible.
void AddConstructorEntries(Variant variant, TNode<Context> context,
TNode<Context> native_context,
TNode<Object> collection,
- TNode<Object> initial_entries,
- TNode<BoolT> is_fast_jsarray);
+ TNode<Object> initial_entries);
// Fast path for adding constructor entries. Assumes the entries are a fast
// JS array (see CodeStubAssembler::BranchIfFastJSArray()).
void AddConstructorEntriesFromFastJSArray(Variant variant,
TNode<Context> context,
+ TNode<Context> native_context,
TNode<Object> collection,
- TNode<JSArray> fast_jsarray);
+ TNode<JSArray> fast_jsarray,
+ Label* if_may_have_side_effects);
// Adds constructor entries to a collection using the iterator protocol.
void AddConstructorEntriesFromIterable(Variant variant,
@@ -61,8 +62,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Constructs a collection instance. Choosing a fast path when possible.
TNode<Object> AllocateJSCollection(TNode<Context> context,
- TNode<Context> native_context,
- int constructor_function_index,
+ TNode<JSFunction> constructor,
TNode<Object> new_target);
// Fast path for constructing a collection instance if the constructor
@@ -72,7 +72,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Fallback for constructing a collection instance if the constructor function
// has been modified.
TNode<Object> AllocateJSCollectionSlow(TNode<Context> context,
- TNode<HeapObject> constructor,
+ TNode<JSFunction> constructor,
TNode<Object> new_target);
// Allocates the backing store for a collection.
@@ -81,15 +81,26 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Main entry point for a collection constructor builtin.
void GenerateConstructor(Variant variant,
- const int constructor_function_index,
- Handle<String> constructor_function_name,
- int collection_tableoffset);
+ Handle<String> constructor_function_name);
// Retrieves the collection function that adds an entry. `set` for Maps and
// `add` for Sets.
TNode<Object> GetAddFunction(Variant variant, TNode<Context> context,
TNode<Object> collection);
+ // Retrieves the collection constructor function.
+ TNode<JSFunction> GetConstructor(Variant variant,
+ TNode<Context> native_context);
+
+ // Retrieves the initial collection function that adds an entry. Should only
+ // be called when it is certain that a collection prototype's map hasn't been
+ // changed.
+ TNode<JSFunction> GetInitialAddFunction(Variant variant,
+ TNode<Context> native_context);
+
+ // Retrieves the offset to access the backing table from the collection.
+ int GetTableOffset(Variant variant);
+
// Estimates the number of entries the collection will have after adding the
// entries passed in the constructor. AllocateTable() can use this to avoid
// the time of growing/rehashing when adding the constructor entries.
@@ -98,6 +109,11 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
void GotoIfNotJSReceiver(Node* const obj, Label* if_not_receiver);
+ // Determines whether the collection's prototype has been modified.
+ TNode<BoolT> HasInitialCollectionPrototype(Variant variant,
+ TNode<Context> native_context,
+ TNode<Object> collection);
+
// Loads an element from a fixed array. If the element is the hole, returns
// `undefined`.
TNode<Object> LoadAndNormalizeFixedArrayElement(TNode<Object> elements,
@@ -112,59 +128,85 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// array. If the array lacks 2 elements, undefined is used.
void LoadKeyValue(TNode<Context> context, TNode<Object> maybe_array,
TVariable<Object>* key, TVariable<Object>* value,
+ Label* if_may_have_side_effects = nullptr,
Label* if_exception = nullptr,
TVariable<Object>* var_exception = nullptr);
};
-TNode<Object> BaseCollectionsAssembler::AddConstructorEntry(
+void BaseCollectionsAssembler::AddConstructorEntry(
Variant variant, TNode<Context> context, TNode<Object> collection,
- TNode<Object> add_function, TNode<Object> key_value, Label* if_exception,
+ TNode<Object> add_function, TNode<Object> key_value,
+ Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
- if (variant == kMap) {
- Label exit(this), if_notobject(this, Label::kDeferred);
- GotoIfNotJSReceiver(key_value, &if_notobject);
-
+ if (variant == kMap || variant == kWeakMap) {
TVARIABLE(Object, key);
TVARIABLE(Object, value);
- LoadKeyValue(context, key_value, &key, &value, if_exception, var_exception);
- Node* key_n = key;
- Node* value_n = value;
- TNode<Object> add_call =
- UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
- add_function, collection, key_n, value_n));
- Goto(&exit);
-
- BIND(&if_notobject);
- {
- Node* ret = CallRuntime(
- Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kIteratorValueNotAnObject), key_value);
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(ret, if_exception, var_exception);
- }
- Unreachable();
- }
- BIND(&exit);
- return add_call;
-
- } else { // variant == kSet
- DCHECK(variant == kSet);
- return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
- add_function, collection, key_value));
+ LoadKeyValue(context, key_value, &key, &value, if_may_have_side_effects,
+ if_exception, var_exception);
+ Node* key_n = key.value();
+ Node* value_n = value.value();
+ Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
+ collection, key_n, value_n);
+ GotoIfException(ret, if_exception, var_exception);
+ } else {
+ DCHECK(variant == kSet || variant == kWeakSet);
+ Node* ret = CallJS(CodeFactory::Call(isolate()), context, add_function,
+ collection, key_value);
+ GotoIfException(ret, if_exception, var_exception);
}
}
void BaseCollectionsAssembler::AddConstructorEntries(
Variant variant, TNode<Context> context, TNode<Context> native_context,
- TNode<Object> collection, TNode<Object> initial_entries,
- TNode<BoolT> is_fast_jsarray) {
- Label exit(this), slow_loop(this, Label::kDeferred);
- GotoIf(IsNullOrUndefined(initial_entries), &exit);
+ TNode<Object> collection, TNode<Object> initial_entries) {
+ TVARIABLE(BoolT, use_fast_loop,
+ IsFastJSArrayWithNoCustomIteration(initial_entries, context,
+ native_context));
+ TNode<IntPtrT> at_least_space_for =
+ EstimatedInitialSize(initial_entries, use_fast_loop.value());
+ Label allocate_table(this, &use_fast_loop), exit(this), fast_loop(this),
+ slow_loop(this, Label::kDeferred);
+ Goto(&allocate_table);
+ BIND(&allocate_table);
+ {
+ TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+ StoreObjectField(collection, GetTableOffset(variant), table);
+ GotoIf(IsNullOrUndefined(initial_entries), &exit);
+ GotoIfNot(
+ HasInitialCollectionPrototype(variant, native_context, collection),
+ &slow_loop);
+ Branch(use_fast_loop.value(), &fast_loop, &slow_loop);
+ }
+ BIND(&fast_loop);
+ {
+ TNode<JSArray> initial_entries_jsarray =
+ UncheckedCast<JSArray>(initial_entries);
+#if DEBUG
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(
+ initial_entries_jsarray, context, native_context));
+ TNode<Map> original_initial_entries_map = LoadMap(initial_entries_jsarray);
+#endif
+
+ Label if_may_have_side_effects(this, Label::kDeferred);
+ AddConstructorEntriesFromFastJSArray(variant, context, native_context,
+ collection, initial_entries_jsarray,
+ &if_may_have_side_effects);
+ Goto(&exit);
- // TODO(mvstanton): Re-enable the fast path when a fix is found for
- // crbug.com/798026.
+ if (variant == kMap || variant == kWeakMap) {
+ BIND(&if_may_have_side_effects);
+#if DEBUG
+ CSA_ASSERT(this, HasInitialCollectionPrototype(variant, native_context,
+ collection));
+ CSA_ASSERT(this, WordEqual(original_initial_entries_map,
+ LoadMap(initial_entries_jsarray)));
+#endif
+ use_fast_loop = Int32FalseConstant();
+ Goto(&allocate_table);
+ }
+ }
+ BIND(&slow_loop);
{
AddConstructorEntriesFromIterable(variant, context, native_context,
collection, initial_entries);
@@ -174,17 +216,26 @@ void BaseCollectionsAssembler::AddConstructorEntries(
}
void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
- Variant variant, TNode<Context> context, TNode<Object> collection,
- TNode<JSArray> fast_jsarray) {
+ Variant variant, TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> collection, TNode<JSArray> fast_jsarray,
+ Label* if_may_have_side_effects) {
TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
TNode<Int32T> elements_kind = LoadMapElementsKind(LoadMap(fast_jsarray));
+ TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
+ CSA_ASSERT(
+ this,
+ WordEqual(GetAddFunction(variant, native_context, collection), add_func));
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(fast_jsarray, context,
+ native_context));
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
- TNode<Object> add_func = GetAddFunction(variant, context, collection);
-
- CSA_ASSERT(this, IsFastJSArray(fast_jsarray, context));
- CSA_ASSERT(this, IsFastElementsKind(elements_kind));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
+ CSA_ASSERT(
+ this, HasInitialCollectionPrototype(variant, native_context, collection));
+#if DEBUG
+ TNode<Map> original_collection_map = LoadMap(CAST(collection));
+ TNode<Map> original_fast_js_array_map = LoadMap(fast_jsarray);
+#endif
Label exit(this), if_doubles(this), if_smiorobjects(this);
Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
&if_doubles);
@@ -193,8 +244,14 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
auto set_entry = [&](Node* index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement(
elements, UncheckedCast<IntPtrT>(index));
- AddConstructorEntry(variant, context, collection, add_func, element);
+ AddConstructorEntry(variant, context, collection, add_func, element,
+ if_may_have_side_effects);
};
+
+ // Instead of using the slower iteration protocol to iterate over the
+ // elements, a fast loop is used. This assumes that adding an element
+ // to the collection does not call user code that could mutate the elements
+ // or collection.
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
Goto(&exit);
@@ -203,7 +260,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
{
// A Map constructor requires entries to be arrays (ex. [key, value]),
// so a FixedDoubleArray can never succeed.
- if (variant == kMap) {
+ if (variant == kMap || variant == kWeakMap) {
TNode<Float64T> element =
UncheckedCast<Float64T>(LoadFixedDoubleArrayElement(
elements, IntPtrConstant(0), MachineType::Float64(), 0,
@@ -211,10 +268,11 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
ThrowTypeError(context, MessageTemplate::kIteratorValueNotAnObject,
AllocateHeapNumberWithValue(element));
} else {
+ DCHECK(variant == kSet || variant == kWeakSet);
auto set_entry = [&](Node* index) {
TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
elements, UncheckedCast<IntPtrT>(index));
- AddConstructorEntry(kSet, context, collection, add_func, entry);
+ AddConstructorEntry(variant, context, collection, add_func, entry);
};
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
@@ -222,6 +280,12 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
}
}
BIND(&exit);
+#if DEBUG
+ CSA_ASSERT(this,
+ WordEqual(original_collection_map, LoadMap(CAST(collection))));
+ CSA_ASSERT(this,
+ WordEqual(original_fast_js_array_map, LoadMap(fast_jsarray)));
+#endif
}
void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
@@ -247,10 +311,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
context, iterator, &exit, fast_iterator_result_map));
TNode<Object> next_value = CAST(iterator_assembler.IteratorValue(
context, next, fast_iterator_result_map));
- TNode<Object> add_result =
- AddConstructorEntry(variant, context, collection, add_func, next_value,
- &if_exception, &var_exception);
- GotoIfException(add_result, &if_exception, &var_exception);
+ AddConstructorEntry(variant, context, collection, add_func, next_value,
+ nullptr, &if_exception, &var_exception);
Goto(&loop);
}
BIND(&if_exception);
@@ -262,10 +324,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
- TNode<Context> context, TNode<Context> native_context,
- int constructor_function_index, TNode<Object> new_target) {
- TNode<HeapObject> constructor =
- CAST(LoadContextElement(native_context, constructor_function_index));
+ TNode<Context> context, TNode<JSFunction> constructor,
+ TNode<Object> new_target) {
TNode<BoolT> is_target_unmodified = WordEqual(constructor, new_target);
return Select<Object>(is_target_unmodified,
@@ -286,7 +346,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
- TNode<Context> context, TNode<HeapObject> constructor,
+ TNode<Context> context, TNode<JSFunction> constructor,
TNode<Object> new_target) {
ConstructorBuiltinsAssembler constructor_assembler(this->state());
return CAST(constructor_assembler.EmitFastNewObject(context, constructor,
@@ -294,8 +354,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
}
void BaseCollectionsAssembler::GenerateConstructor(
- Variant variant, const int constructor_function_index,
- Handle<String> constructor_function_name, int collection_tableoffset) {
+ Variant variant, Handle<String> constructor_function_name) {
const int kIterableArg = 0;
CodeStubArguments args(
this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
@@ -306,17 +365,11 @@ void BaseCollectionsAssembler::GenerateConstructor(
Label if_undefined(this, Label::kDeferred);
GotoIf(IsUndefined(new_target), &if_undefined);
- TNode<BoolT> is_fast_jsarray = IsFastJSArray(iterable, context);
- TNode<IntPtrT> at_least_space_for =
- EstimatedInitialSize(iterable, is_fast_jsarray);
TNode<Context> native_context = LoadNativeContext(context);
TNode<Object> collection = AllocateJSCollection(
- context, native_context, constructor_function_index, new_target);
- TNode<Object> table = AllocateTable(variant, context, at_least_space_for);
+ context, GetConstructor(variant, native_context), new_target);
- StoreObjectField(collection, collection_tableoffset, table);
- AddConstructorEntries(variant, context, native_context, collection, iterable,
- is_fast_jsarray);
+ AddConstructorEntries(variant, context, native_context, collection, iterable);
Return(collection);
BIND(&if_undefined);
@@ -326,14 +379,10 @@ void BaseCollectionsAssembler::GenerateConstructor(
TNode<Object> BaseCollectionsAssembler::GetAddFunction(
Variant variant, TNode<Context> context, TNode<Object> collection) {
- // TODO(pwong): Consider calling the builtin directly when the prototype is
- // unmodified. This will require tracking WeakMap/WeakSet prototypes on the
- // native context.
- Handle<String> add_func_name = variant == kMap
+ Handle<String> add_func_name = (variant == kMap || variant == kWeakMap)
? isolate()->factory()->set_string()
: isolate()->factory()->add_string();
- TNode<Object> add_func =
- CAST(GetProperty(context, collection, add_func_name));
+ TNode<Object> add_func = GetProperty(context, collection, add_func_name);
Label exit(this), if_notcallable(this, Label::kDeferred);
GotoIf(TaggedIsSmi(add_func), &if_notcallable);
@@ -348,6 +397,60 @@ TNode<Object> BaseCollectionsAssembler::GetAddFunction(
return add_func;
}
+TNode<JSFunction> BaseCollectionsAssembler::GetConstructor(
+ Variant variant, TNode<Context> native_context) {
+ int index;
+ switch (variant) {
+ case kMap:
+ index = Context::JS_MAP_FUN_INDEX;
+ break;
+ case kSet:
+ index = Context::JS_SET_FUN_INDEX;
+ break;
+ case kWeakMap:
+ index = Context::JS_WEAK_MAP_FUN_INDEX;
+ break;
+ case kWeakSet:
+ index = Context::JS_WEAK_SET_FUN_INDEX;
+ break;
+ }
+ return CAST(LoadContextElement(native_context, index));
+}
+
+TNode<JSFunction> BaseCollectionsAssembler::GetInitialAddFunction(
+ Variant variant, TNode<Context> native_context) {
+ int index;
+ switch (variant) {
+ case kMap:
+ index = Context::MAP_SET_INDEX;
+ break;
+ case kSet:
+ index = Context::SET_ADD_INDEX;
+ break;
+ case kWeakMap:
+ index = Context::WEAKMAP_SET_INDEX;
+ break;
+ case kWeakSet:
+ index = Context::WEAKSET_ADD_INDEX;
+ break;
+ }
+ return CAST(LoadContextElement(native_context, index));
+}
+
+int BaseCollectionsAssembler::GetTableOffset(Variant variant) {
+ switch (variant) {
+ case kMap:
+ return JSMap::kTableOffset;
+ case kSet:
+ return JSSet::kTableOffset;
+ case kWeakMap:
+ return JSWeakMap::kTableOffset;
+ case kWeakSet:
+ return JSWeakSet::kTableOffset;
+ }
+ UNREACHABLE();
+}
+
TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
TNode<Object> initial_entries, TNode<BoolT> is_fast_jsarray) {
return Select<IntPtrT>(
@@ -362,6 +465,31 @@ void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
GotoIfNot(IsJSReceiver(obj), if_not_receiver);
}
+TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
+ Variant variant, TNode<Context> native_context, TNode<Object> collection) {
+ int initial_prototype_index;
+ switch (variant) {
+ case kMap:
+ initial_prototype_index = Context::INITIAL_MAP_PROTOTYPE_MAP_INDEX;
+ break;
+ case kSet:
+ initial_prototype_index = Context::INITIAL_SET_PROTOTYPE_MAP_INDEX;
+ break;
+ case kWeakMap:
+ initial_prototype_index = Context::INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX;
+ break;
+ case kWeakSet:
+ initial_prototype_index = Context::INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX;
+ break;
+ }
+ TNode<Map> initial_prototype_map =
+ CAST(LoadContextElement(native_context, initial_prototype_index));
+ TNode<Map> collection_proto_map =
+ LoadMap(CAST(LoadMapPrototype(LoadMap(CAST(collection)))));
+
+ return WordEqual(collection_proto_map, initial_prototype_map);
+}
+
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
TNode<Object> elements, TNode<IntPtrT> index) {
TNode<Object> element = CAST(LoadFixedArrayElement(elements, index));
@@ -386,15 +514,13 @@ TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
Goto(&next);
}
BIND(&next);
- return entry;
+ return entry.value();
}
-void BaseCollectionsAssembler::LoadKeyValue(TNode<Context> context,
- TNode<Object> maybe_array,
- TVariable<Object>* key,
- TVariable<Object>* value,
- Label* if_exception,
- TVariable<Object>* var_exception) {
+void BaseCollectionsAssembler::LoadKeyValue(
+ TNode<Context> context, TNode<Object> maybe_array, TVariable<Object>* key,
+ TVariable<Object>* value, Label* if_may_have_side_effects,
+ Label* if_exception, TVariable<Object>* var_exception) {
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(maybe_array)));
Label exit(this), if_fast(this), if_slow(this, Label::kDeferred);
@@ -461,20 +587,31 @@ void BaseCollectionsAssembler::LoadKeyValue(TNode<Context> context,
}
BIND(&if_slow);
{
- *key = UncheckedCast<Object>(
- GetProperty(context, maybe_array, isolate()->factory()->zero_string()));
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(*key, if_exception, var_exception);
- }
+ Label if_notobject(this, Label::kDeferred);
+ GotoIfNotJSReceiver(maybe_array, &if_notobject);
+ if (if_may_have_side_effects != nullptr) {
+ // If the element is not a fast array, we cannot guarantee accessing the
+ // key and value won't execute user code that will break fast path
+ // assumptions.
+ Goto(if_may_have_side_effects);
+ } else {
+ *key = UncheckedCast<Object>(GetProperty(
+ context, maybe_array, isolate()->factory()->zero_string()));
+ GotoIfException(key->value(), if_exception, var_exception);
- *value = UncheckedCast<Object>(
- GetProperty(context, maybe_array, isolate()->factory()->one_string()));
- if (if_exception != nullptr) {
- DCHECK(var_exception != nullptr);
- GotoIfException(*value, if_exception, var_exception);
+ *value = UncheckedCast<Object>(GetProperty(
+ context, maybe_array, isolate()->factory()->one_string()));
+ GotoIfException(value->value(), if_exception, var_exception);
+ Goto(&exit);
+ }
+ BIND(&if_notobject);
+ {
+ Node* ret = CallRuntime(
+ Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kIteratorValueNotAnObject), maybe_array);
+ GotoIfException(ret, if_exception, var_exception);
+ Unreachable();
}
- Goto(&exit);
}
BIND(&exit);
}
@@ -672,18 +809,17 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator(
TNode<Object> CollectionsBuiltinsAssembler::AllocateTable(
Variant variant, TNode<Context> context,
TNode<IntPtrT> at_least_space_for) {
- return CAST(variant == kMap ? AllocateOrderedHashTable<OrderedHashMap>()
- : AllocateOrderedHashTable<OrderedHashSet>());
+ return CAST((variant == kMap || variant == kWeakMap)
+ ? AllocateOrderedHashTable<OrderedHashMap>()
+ : AllocateOrderedHashTable<OrderedHashSet>());
}
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kMap, Context::JS_MAP_FUN_INDEX,
- isolate()->factory()->Map_string(), JSMap::kTableOffset);
+ GenerateConstructor(kMap, isolate()->factory()->Map_string());
}
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
- GenerateConstructor(kSet, Context::JS_SET_FUN_INDEX,
- isolate()->factory()->Set_string(), JSSet::kTableOffset);
+ GenerateConstructor(kSet, isolate()->factory()->Set_string());
}
Node* CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw(Node* const key) {
@@ -1049,9 +1185,9 @@ std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
GotoIf(TaggedIsSmi(next_table), &done_loop);
var_table.Bind(next_table);
- var_index.Bind(
- SmiUntag(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
- NoContextConstant(), table, SmiTag(index))));
+ var_index.Bind(SmiUntag(
+ CAST(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
+ NoContextConstant(), table, SmiTag(index)))));
Goto(&loop);
}
BIND(&done_loop);
@@ -1624,7 +1760,8 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE),
&if_receiver_valid, &if_receiver_invalid);
BIND(&if_receiver_invalid);
- ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(kMethodName), receiver);
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
@@ -1837,7 +1974,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
InstanceTypeEqual(receiver_instance_type, JS_SET_KEY_VALUE_ITERATOR_TYPE),
&if_receiver_valid, &if_receiver_invalid);
BIND(&if_receiver_invalid);
- ThrowIncompatibleMethodReceiver(context, kMethodName, receiver);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(kMethodName), receiver);
BIND(&if_receiver_valid);
// Check if the {receiver} is exhausted.
@@ -2019,7 +2157,7 @@ void WeakCollectionsBuiltinsAssembler::AddEntry(
// See HashTableBase::ElementAdded().
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
- SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
}
TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
@@ -2043,7 +2181,7 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
SmiConstant(0), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table, ObjectHashTable::kCapacityIndex,
- SmiFromWord(capacity), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(capacity), SKIP_WRITE_BARRIER);
TNode<IntPtrT> start = KeyIndexFromEntry(IntPtrConstant(0));
FillFixedArrayWithValue(HOLEY_ELEMENTS, table, start, length,
@@ -2083,16 +2221,15 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
BIND(&loop);
TNode<IntPtrT> key_index;
{
- key_index = KeyIndexFromEntry(var_entry);
+ key_index = KeyIndexFromEntry(var_entry.value());
TNode<Object> entry_key = CAST(LoadFixedArrayElement(table, key_index));
key_compare(entry_key, &if_found);
// See HashTable::NextProbe().
Increment(&var_count);
- var_entry = WordAnd(IntPtrAdd(UncheckedCast<IntPtrT>(var_entry),
- UncheckedCast<IntPtrT>(var_count)),
- entry_mask);
+ var_entry =
+ WordAnd(IntPtrAdd(var_entry.value(), var_count.value()), entry_mask);
Goto(&loop);
}
@@ -2186,9 +2323,9 @@ void WeakCollectionsBuiltinsAssembler::RemoveEntry(
// See HashTableBase::ElementRemoved().
TNode<IntPtrT> number_of_deleted = LoadNumberOfDeleted(table, 1);
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex,
- SmiFromWord(number_of_elements), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER);
StoreFixedArrayElement(table, ObjectHashTable::kNumberOfDeletedElementsIndex,
- SmiFromWord(number_of_deleted), SKIP_WRITE_BARRIER);
+ SmiFromIntPtr(number_of_deleted), SKIP_WRITE_BARRIER);
}
TNode<BoolT> WeakCollectionsBuiltinsAssembler::ShouldRehash(
@@ -2222,15 +2359,11 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
}
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kMap, Context::JS_WEAK_MAP_FUN_INDEX,
- isolate()->factory()->WeakMap_string(),
- JSWeakMap::kTableOffset);
+ GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string());
}
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
- GenerateConstructor(kSet, Context::JS_WEAK_SET_FUN_INDEX,
- isolate()->factory()->WeakSet_string(),
- JSWeakSet::kTableOffset);
+ GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string());
}
TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
@@ -2342,8 +2475,8 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
TNode<IntPtrT> entry_mask = EntryMask(capacity);
TVARIABLE(IntPtrT, var_hash, LoadJSReceiverIdentityHash(key, &if_no_hash));
- TNode<IntPtrT> key_index =
- FindKeyIndexForKey(table, key, var_hash, entry_mask, &if_not_found);
+ TNode<IntPtrT> key_index = FindKeyIndexForKey(table, key, var_hash.value(),
+ entry_mask, &if_not_found);
StoreFixedArrayElement(table, ValueIndexFromKeyIndex(key_index), value);
Return(collection);
@@ -2365,14 +2498,14 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
&call_runtime);
TNode<IntPtrT> insertion_key_index =
- FindKeyIndexForInsertion(table, var_hash, entry_mask);
+ FindKeyIndexForInsertion(table, var_hash.value(), entry_mask);
AddEntry(table, insertion_key_index, key, value, number_of_elements);
Return(collection);
}
BIND(&call_runtime);
{
CallRuntime(Runtime::kWeakCollectionSet, context, collection, key, value,
- SmiTag(var_hash));
+ SmiTag(var_hash.value()));
Return(collection);
}
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 5c3883a870..945fb4394b 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -55,21 +55,54 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
typedef compiler::Node Node;
-Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
- Node* feedback_vector,
- Node* slot,
- Node* context) {
- Isolate* isolate = this->isolate();
- Factory* factory = isolate->factory();
- IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
-
- Node* compiler_hints =
- LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
+Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
+ return TaggedIsSmi(literal_site);
+}
+
+Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
+ CSA_ASSERT(this, IsAllocationSite(site));
+ return LoadObjectField(site,
+ AllocationSite::kTransitionInfoOrBoilerplateOffset);
+}
+
+TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
+ Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo);
+ Node* feedback_cell = Parameter(Descriptor::kFeedbackCell);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsFeedbackCell(feedback_cell));
+ CSA_ASSERT(this, IsSharedFunctionInfo(shared_function_info));
+
+ IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1);
+
+ // Bump the closure counter encoded the {feedback_cell}s map.
+ {
+ Node* const feedback_cell_map = LoadMap(feedback_cell);
+ Label no_closures(this), one_closure(this), cell_done(this);
+
+ GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
+ GotoIf(IsOneClosureCellMap(feedback_cell_map), &one_closure);
+ CSA_ASSERT(this, IsManyClosuresCellMap(feedback_cell_map),
+ feedback_cell_map, feedback_cell);
+ Goto(&cell_done);
+
+ BIND(&no_closures);
+ StoreMapNoWriteBarrier(feedback_cell, Heap::kOneClosureCellMapRootIndex);
+ Goto(&cell_done);
+
+ BIND(&one_closure);
+ StoreMapNoWriteBarrier(feedback_cell, Heap::kManyClosuresCellMapRootIndex);
+ Goto(&cell_done);
+
+ BIND(&cell_done);
+ }
// The calculation of |function_map_index| must be in sync with
// SharedFunctionInfo::function_map_index().
- Node* function_map_index =
+ Node* const compiler_hints = LoadObjectField(
+ shared_function_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* const function_map_index =
IntPtrAdd(DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(
compiler_hints),
IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
@@ -79,24 +112,24 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
// Get the function map in the current native context and set that
// as the map of the allocated object.
- Node* native_context = LoadNativeContext(context);
- Node* function_map = LoadContextElement(native_context, function_map_index);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const function_map =
+ LoadContextElement(native_context, function_map_index);
// Create a new closure from the given function info in new space
Node* instance_size_in_bytes =
TimesPointerSize(LoadMapInstanceSizeInWords(function_map));
- Node* result = Allocate(instance_size_in_bytes);
+ Node* const result = Allocate(instance_size_in_bytes);
StoreMapNoWriteBarrier(result, function_map);
InitializeJSObjectBodyNoSlackTracking(result, function_map,
instance_size_in_bytes,
JSFunction::kSizeWithoutPrototype);
// Initialize the rest of the function.
- Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
- StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOrHashOffset,
- empty_fixed_array);
- StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
- empty_fixed_array);
+ StoreObjectFieldRoot(result, JSObject::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
{
// Set function prototype if necessary.
Label done(this), init_prototype(this);
@@ -104,65 +137,23 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
&done);
BIND(&init_prototype);
- StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
+ StoreObjectFieldRoot(result, JSFunction::kPrototypeOrInitialMapOffset,
+ Heap::kTheHoleValueRootIndex);
Goto(&done);
-
BIND(&done);
}
- Node* literals_cell = LoadFeedbackVectorSlot(
- feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
- {
- // Bump the closure counter encoded in the cell's map.
- Node* cell_map = LoadMap(literals_cell);
- Label no_closures(this), one_closure(this), cell_done(this);
-
- GotoIf(IsNoClosuresCellMap(cell_map), &no_closures);
- GotoIf(IsOneClosureCellMap(cell_map), &one_closure);
- CSA_ASSERT(this, IsManyClosuresCellMap(cell_map), cell_map, literals_cell,
- feedback_vector, slot);
- Goto(&cell_done);
-
- BIND(&no_closures);
- StoreMapNoWriteBarrier(literals_cell, Heap::kOneClosureCellMapRootIndex);
- Goto(&cell_done);
-
- BIND(&one_closure);
- StoreMapNoWriteBarrier(literals_cell, Heap::kManyClosuresCellMapRootIndex);
- Goto(&cell_done);
-
- BIND(&cell_done);
- }
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
- literals_cell);
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackCellOffset,
+ feedback_cell);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
- shared_info);
+ shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
Handle<Code> lazy_builtin_handle(
- isolate->builtins()->builtin(Builtins::kCompileLazy));
+ isolate()->builtins()->builtin(Builtins::kCompileLazy));
Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
- return result;
-}
-
-Node* ConstructorBuiltinsAssembler::NotHasBoilerplate(Node* literal_site) {
- return TaggedIsSmi(literal_site);
-}
-
-Node* ConstructorBuiltinsAssembler::LoadAllocationSiteBoilerplate(Node* site) {
- CSA_ASSERT(this, IsAllocationSite(site));
- return LoadObjectField(site,
- AllocationSite::kTransitionInfoOrBoilerplateOffset);
-}
-
-TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
- Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
- Node* context = Parameter(FastNewClosureDescriptor::kContext);
- Node* vector = Parameter(FastNewClosureDescriptor::kVector);
- Node* slot = Parameter(FastNewClosureDescriptor::kSlot);
- Return(EmitFastNewClosure(shared, vector, slot, context));
+ Return(result);
}
TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
@@ -418,7 +409,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
BIND(&create_empty_array);
CSA_ASSERT(this, IsAllocationSite(allocation_site.value()));
- Node* kind = SmiToWord32(CAST(
+ Node* kind = SmiToInt32(CAST(
LoadObjectField(allocation_site.value(),
AllocationSite::kTransitionInfoOrBoilerplateOffset)));
CSA_ASSERT(this, IsFastElementsKind(kind));
@@ -662,7 +653,7 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
args.PopAndReturn(EmitCreateEmptyObjectLiteral(context));
BIND(&return_to_object);
- args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
+ args.PopAndReturn(ToObject(context, value));
}
TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
@@ -687,7 +678,7 @@ TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
args.PopAndReturn(EmitFastNewObject(context, target, new_target));
BIND(&return_to_object);
- args.PopAndReturn(CallBuiltin(Builtins::kToObject, context, value));
+ args.PopAndReturn(ToObject(context, value));
}
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index ac13dcbb6d..f6d71882bc 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -15,8 +15,6 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
explicit ConstructorBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* EmitFastNewClosure(Node* shared_info, Node* feedback_vector, Node* slot,
- Node* context);
Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
ScopeType scope_type);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 98e0f2c8b2..dc3e8d53c4 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -62,7 +62,7 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
BIND(&if_resultisnotprimitive);
{
// Somehow the @@toPrimitive method on {input} didn't yield a primitive.
- TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+ ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
}
}
@@ -99,7 +99,7 @@ TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) {
}
TF_BUILTIN(StringToNumber, CodeStubAssembler) {
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<String> input = CAST(Parameter(Descriptor::kArgument));
Return(StringToNumber(input));
}
@@ -144,7 +144,7 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
// ES section #sec-tostring-applied-to-the-number-type
TF_BUILTIN(NumberToString, CodeStubAssembler) {
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Number> input = CAST(Parameter(Descriptor::kArgument));
Return(NumberToString(input));
}
@@ -208,7 +208,7 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
BIND(&if_methodisnotcallable);
}
- TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+ ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
BIND(&return_result);
Return(var_result.value());
@@ -383,20 +383,13 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
Return(js_value);
BIND(&if_noconstructor);
- TailCallRuntime(Runtime::kThrowUndefinedOrNullToObject, context,
- StringConstant("ToObject"));
+ ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject,
+ "ToObject");
BIND(&if_jsreceiver);
Return(object);
}
-// Deprecated ES5 [[Class]] internal property (used to implement %_ClassOf).
-TF_BUILTIN(ClassOf, CodeStubAssembler) {
- Node* object = Parameter(TypeofDescriptor::kObject);
-
- Return(ClassOf(object));
-}
-
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
Node* object = Parameter(TypeofDescriptor::kObject);
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index df7058d377..38b3d90649 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -157,6 +157,21 @@ void FlipBytes(uint8_t* target, uint8_t const* source) {
}
}
+template <typename T>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, T value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+template <>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, int64_t value) {
+ return BigInt::FromInt64(isolate, value);
+}
+
+template <>
+MaybeHandle<Object> AllocateResult(Isolate* isolate, uint64_t value) {
+ return BigInt::FromUint64(isolate, value);
+}
+
// ES6 section 24.2.1.1 GetViewValue (view, requestIndex, isLittleEndian, type)
template <typename T>
MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
@@ -196,50 +211,78 @@ MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
} else {
CopyBytes<sizeof(T)>(v.bytes, source);
}
- return isolate->factory()->NewNumber(v.data);
+ return AllocateResult<T>(isolate, v.data);
+}
+
+template <typename T>
+MaybeHandle<Object> DataViewConvertInput(Isolate* isolate,
+ Handle<Object> input) {
+ return Object::ToNumber(input);
+}
+
+template <>
+MaybeHandle<Object> DataViewConvertInput<int64_t>(Isolate* isolate,
+ Handle<Object> input) {
+ return BigInt::FromObject(isolate, input);
+}
+
+template <>
+MaybeHandle<Object> DataViewConvertInput<uint64_t>(Isolate* isolate,
+ Handle<Object> input) {
+ return BigInt::FromObject(isolate, input);
}
template <typename T>
-T DataViewConvertValue(double value);
+T DataViewConvertValue(Handle<Object> value);
+
+template <>
+int8_t DataViewConvertValue<int8_t>(Handle<Object> value) {
+ return static_cast<int8_t>(DoubleToInt32(value->Number()));
+}
+
+template <>
+int16_t DataViewConvertValue<int16_t>(Handle<Object> value) {
+ return static_cast<int16_t>(DoubleToInt32(value->Number()));
+}
template <>
-int8_t DataViewConvertValue<int8_t>(double value) {
- return static_cast<int8_t>(DoubleToInt32(value));
+int32_t DataViewConvertValue<int32_t>(Handle<Object> value) {
+ return DoubleToInt32(value->Number());
}
template <>
-int16_t DataViewConvertValue<int16_t>(double value) {
- return static_cast<int16_t>(DoubleToInt32(value));
+uint8_t DataViewConvertValue<uint8_t>(Handle<Object> value) {
+ return static_cast<uint8_t>(DoubleToUint32(value->Number()));
}
template <>
-int32_t DataViewConvertValue<int32_t>(double value) {
- return DoubleToInt32(value);
+uint16_t DataViewConvertValue<uint16_t>(Handle<Object> value) {
+ return static_cast<uint16_t>(DoubleToUint32(value->Number()));
}
template <>
-uint8_t DataViewConvertValue<uint8_t>(double value) {
- return static_cast<uint8_t>(DoubleToUint32(value));
+uint32_t DataViewConvertValue<uint32_t>(Handle<Object> value) {
+ return DoubleToUint32(value->Number());
}
template <>
-uint16_t DataViewConvertValue<uint16_t>(double value) {
- return static_cast<uint16_t>(DoubleToUint32(value));
+float DataViewConvertValue<float>(Handle<Object> value) {
+ return static_cast<float>(value->Number());
}
template <>
-uint32_t DataViewConvertValue<uint32_t>(double value) {
- return DoubleToUint32(value);
+double DataViewConvertValue<double>(Handle<Object> value) {
+ return value->Number();
}
template <>
-float DataViewConvertValue<float>(double value) {
- return static_cast<float>(value);
+int64_t DataViewConvertValue<int64_t>(Handle<Object> value) {
+ return BigInt::cast(*value)->AsInt64();
}
template <>
-double DataViewConvertValue<double>(double value) {
- return value;
+uint64_t DataViewConvertValue<uint64_t>(Handle<Object> value) {
+ return BigInt::cast(*value)->AsUint64();
}
// ES6 section 24.2.1.2 SetViewValue (view, requestIndex, isLittleEndian, type,
@@ -253,7 +296,8 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
Object::ToIndex(isolate, request_index,
MessageTemplate::kInvalidDataViewAccessorOffset),
Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::ToNumber(value), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ DataViewConvertInput<T>(isolate, value), Object);
size_t get_index = 0;
if (!TryNumberToSize(*request_index, &get_index)) {
THROW_NEW_ERROR(
@@ -274,7 +318,7 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
T data;
uint8_t bytes[sizeof(T)];
} v;
- v.data = DataViewConvertValue<T>(value->Number());
+ v.data = DataViewConvertValue<T>(value);
size_t const buffer_offset = data_view_byte_offset + get_index;
DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
uint8_t* const target =
@@ -310,6 +354,8 @@ DATA_VIEW_PROTOTYPE_GET(Int32, int32_t)
DATA_VIEW_PROTOTYPE_GET(Uint32, uint32_t)
DATA_VIEW_PROTOTYPE_GET(Float32, float)
DATA_VIEW_PROTOTYPE_GET(Float64, double)
+DATA_VIEW_PROTOTYPE_GET(BigInt64, int64_t)
+DATA_VIEW_PROTOTYPE_GET(BigUint64, uint64_t)
#undef DATA_VIEW_PROTOTYPE_GET
#define DATA_VIEW_PROTOTYPE_SET(Type, type) \
@@ -334,6 +380,8 @@ DATA_VIEW_PROTOTYPE_SET(Int32, int32_t)
DATA_VIEW_PROTOTYPE_SET(Uint32, uint32_t)
DATA_VIEW_PROTOTYPE_SET(Float32, float)
DATA_VIEW_PROTOTYPE_SET(Float64, double)
+DATA_VIEW_PROTOTYPE_SET(BigInt64, int64_t)
+DATA_VIEW_PROTOTYPE_SET(BigUint64, uint64_t)
#undef DATA_VIEW_PROTOTYPE_SET
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index f6f3563d55..8b58c1ec80 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -61,10 +61,7 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context,
// Raise a TypeError if the receiver is not a date.
BIND(&receiver_not_date);
- {
- CallRuntime(Runtime::kThrowNotDateError, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNotDateObject); }
}
TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) {
@@ -240,17 +237,14 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
// Raise a TypeError if the {hint} is invalid.
BIND(&hint_is_invalid);
- {
- CallRuntime(Runtime::kThrowInvalidHint, context, hint);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kInvalidHint, hint); }
// Raise a TypeError if the {receiver} is not a JSReceiver instance.
BIND(&receiver_is_invalid);
{
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant("Date.prototype [ @@toPrimitive ]"), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("Date.prototype [ @@toPrimitive ]"),
+ receiver);
}
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index a4a0bb9e2c..bf5b9086aa 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -70,7 +70,7 @@ namespace internal {
ASM(JSConstructStubGenericUnrestrictedReturn) \
ASM(JSBuiltinsConstructStub) \
TFC(FastNewObject, FastNewObject, 1) \
- TFC(FastNewClosure, FastNewClosure, 1) \
+ TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \
TFC(FastNewFunctionContextEval, FastNewFunctionContext, 1) \
TFC(FastNewFunctionContextFunction, FastNewFunctionContext, 1) \
TFS(CreateRegExpLiteral, kFeedbackVector, kSlot, kPattern, kFlags) \
@@ -92,8 +92,8 @@ namespace internal {
\
/* String helpers */ \
TFC(StringCharAt, StringAt, 1) \
- TFC(StringCharCodeAt, StringAt, 1) \
- TFC(StringCodePointAt, StringAt, 1) \
+ TFC(StringCodePointAtUTF16, StringAt, 1) \
+ TFC(StringCodePointAtUTF32, StringAt, 1) \
TFC(StringEqual, Compare, 1) \
TFC(StringGreaterThan, Compare, 1) \
TFC(StringGreaterThanOrEqual, Compare, 1) \
@@ -101,7 +101,7 @@ namespace internal {
TFC(StringLessThan, Compare, 1) \
TFC(StringLessThanOrEqual, Compare, 1) \
TFS(StringRepeat, kString, kCount) \
- TFS(SubString, kString, kFrom, kTo) \
+ TFC(StringSubstring, StringSubstring, 1) \
\
/* OrderedHashTable helpers */ \
TFS(OrderedHashTableHealIndex, kTable, kIndex) \
@@ -193,7 +193,6 @@ namespace internal {
TFC(ToInteger, TypeConversion, 1) \
TFC(ToInteger_TruncateMinusZero, TypeConversion, 1) \
TFC(ToLength, TypeConversion, 1) \
- TFC(ClassOf, Typeof, 1) \
TFC(Typeof, Typeof, 1) \
TFC(GetSuperConstructor, Typeof, 1) \
\
@@ -216,14 +215,9 @@ namespace internal {
TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
\
- /* Promise helpers */ \
- TFS(ResolveNativePromise, kPromise, kValue) \
- TFS(RejectNativePromise, kPromise, kValue, kDebugEvent) \
- TFS(PerformNativePromiseThen, kPromise, kResolveReaction, kRejectReaction, \
- kResultPromise) \
+ /* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
TFC(RunMicrotasks, RunMicrotasks, 1) \
- TFS(PromiseResolveThenableJob, kMicrotask) \
\
/* Object property helpers */ \
TFS(HasProperty, kKey, kObject) \
@@ -247,6 +241,10 @@ namespace internal {
CPP(ArrayConcat) \
/* ES6 #sec-array.isarray */ \
TFJ(ArrayIsArray, 1, kArg) \
+ /* ES6 #sec-array.from */ \
+ TFJ(ArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-array.of */ \
+ TFJ(ArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES7 #sec-array.prototype.includes */ \
TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
@@ -313,6 +311,7 @@ namespace internal {
/* ES6 #sec-array.prototype.reduce */ \
TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReducePreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
kLength, kAccumulator) \
TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
@@ -321,6 +320,7 @@ namespace internal {
/* ES6 #sec-array.prototype.reduceRight */ \
TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduceRightPreLoopEagerDeoptContinuation, 2, kCallbackFn, kLength) \
TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \
kLength, kAccumulator) \
TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \
@@ -365,17 +365,16 @@ namespace internal {
CPP(ArrayBufferPrototypeSlice) \
\
/* AsyncFunction */ \
- TFJ(AsyncFunctionAwaitCaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitUncaught, 3, kGenerator, kAwaited, kOuterPromise) \
- TFJ(AsyncFunctionAwaitRejectClosure, 1, kSentError) \
- TFJ(AsyncFunctionAwaitResolveClosure, 1, kSentValue) \
+ TFC(AsyncFunctionAwaitFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncFunctionAwaitReject, PromiseReactionHandler, 1) \
+ TFS(AsyncFunctionAwaitCaught, kGenerator, kValue, kOuterPromise) \
+ TFS(AsyncFunctionAwaitUncaught, kGenerator, kValue, kOuterPromise) \
TFJ(AsyncFunctionPromiseCreate, 0) \
TFJ(AsyncFunctionPromiseRelease, 1, kPromise) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
CPP(BigIntConstructor_ConstructStub) \
- CPP(BigIntParseInt) \
CPP(BigIntAsUintN) \
CPP(BigIntAsIntN) \
CPP(BigIntPrototypeToLocaleString) \
@@ -457,6 +456,10 @@ namespace internal {
CPP(DataViewPrototypeSetFloat32) \
CPP(DataViewPrototypeGetFloat64) \
CPP(DataViewPrototypeSetFloat64) \
+ CPP(DataViewPrototypeGetBigInt64) \
+ CPP(DataViewPrototypeSetBigInt64) \
+ CPP(DataViewPrototypeGetBigUint64) \
+ CPP(DataViewPrototypeSetBigUint64) \
\
/* Date */ \
CPP(DateConstructor) \
@@ -755,7 +758,7 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- CPP(ObjectEntries) \
+ TFJ(ObjectEntries, 1, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -785,7 +788,7 @@ namespace internal {
/* ES #sec-object.prototype.tolocalestring */ \
TFJ(ObjectPrototypeToLocaleString, 0) \
CPP(ObjectSeal) \
- CPP(ObjectValues) \
+ TFJ(ObjectValues, 1, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare, 1) \
@@ -796,36 +799,42 @@ namespace internal {
TFS(ForInFilter, kKey, kObject) \
\
/* Promise */ \
+ /* ES #sec-fulfillpromise */ \
+ TFS(FulfillPromise, kPromise, kValue) \
+ /* ES #sec-rejectpromise */ \
+ TFS(RejectPromise, kPromise, kReason, kDebugEvent) \
+ /* ES #sec-promise-resolve-functions */ \
+ /* Starting at step 6 of "Promise Resolve Functions" */ \
+ TFS(ResolvePromise, kPromise, kResolution) \
+ /* ES #sec-promise-reject-functions */ \
+ TFJ(PromiseCapabilityDefaultReject, 1, kReason) \
+ /* ES #sec-promise-resolve-functions */ \
+ TFJ(PromiseCapabilityDefaultResolve, 1, kResolution) \
/* ES6 #sec-getcapabilitiesexecutor-functions */ \
TFJ(PromiseGetCapabilitiesExecutor, 2, kResolve, kReject) \
/* ES6 #sec-newpromisecapability */ \
- TFJ(NewPromiseCapability, 2, kConstructor, kDebugEvent) \
+ TFS(NewPromiseCapability, kConstructor, kDebugEvent) \
+ TFJ(PromiseConstructorLazyDeoptContinuation, 2, kPromise, kResult) \
/* ES6 #sec-promise-executor */ \
TFJ(PromiseConstructor, 1, kExecutor) \
- TFJ(PromiseInternalConstructor, 1, kParent) \
CPP(IsPromise) \
- /* ES #sec-promise-resolve-functions */ \
- TFJ(PromiseResolveClosure, 1, kValue) \
- /* ES #sec-promise-reject-functions */ \
- TFJ(PromiseRejectClosure, 1, kValue) \
- TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.prototype.then */ \
- TFJ(PromisePrototypeThen, 2, kOnFullfilled, kOnRejected) \
+ TFJ(PromisePrototypeThen, 2, kOnFulfilled, kOnRejected) \
+ /* ES #sec-performpromisethen */ \
+ TFS(PerformPromiseThen, kPromise, kOnFulfilled, kOnRejected, kResultPromise) \
/* ES #sec-promise.prototype.catch */ \
TFJ(PromisePrototypeCatch, 1, kOnRejected) \
- /* ES #sec-fulfillpromise */ \
- TFJ(ResolvePromise, 2, kPromise, kValue) \
- TFS(PromiseHandleReject, kPromise, kOnReject, kException) \
- TFS(PromiseHandle, kValue, kHandler, kDeferredPromise, kDeferredOnResolve, \
- kDeferredOnReject) \
- TFJ(PromiseHandleJS, 5, kValue, kHandler, kDeferredPromise, \
- kDeferredOnResolve, kDeferredOnReject) \
+ /* ES #sec-promisereactionjob */ \
+ TFS(PromiseRejectReactionJob, kReason, kHandler, kPayload) \
+ TFS(PromiseFulfillReactionJob, kValue, kHandler, kPayload) \
+ /* ES #sec-promiseresolvethenablejob */ \
+ TFS(PromiseResolveThenableJob, kPromiseToResolve, kThenable, kThen) \
/* ES #sec-promise.resolve */ \
- TFJ(PromiseResolveWrapper, 1, kValue) \
+ TFJ(PromiseResolveTrampoline, 1, kValue) \
+ /* ES #sec-promise-resolve */ \
TFS(PromiseResolve, kConstructor, kValue) \
/* ES #sec-promise.reject */ \
TFJ(PromiseReject, 1, kReason) \
- TFJ(InternalPromiseReject, 3, kPromise, kReason, kDebugEvent) \
TFJ(PromisePrototypeFinally, 1, kOnFinally) \
TFJ(PromiseThenFinally, 1, kValue) \
TFJ(PromiseCatchFinally, 1, kReason) \
@@ -833,8 +842,15 @@ namespace internal {
TFJ(PromiseThrowerFinally, 0) \
/* ES #sec-promise.all */ \
TFJ(PromiseAll, 1, kIterable) \
+ TFJ(PromiseAllResolveElementClosure, 1, kValue) \
/* ES #sec-promise.race */ \
TFJ(PromiseRace, 1, kIterable) \
+ /* V8 Extras: v8.createPromise(parent) */ \
+ TFJ(PromiseInternalConstructor, 1, kParent) \
+ /* V8 Extras: v8.rejectPromise(promise, reason) */ \
+ TFJ(PromiseInternalReject, 2, kPromise, kReason) \
+ /* V8 Extras: v8.resolvePromise(promise, resolution) */ \
+ TFJ(PromiseInternalResolve, 2, kPromise, kResolution) \
\
/* Proxy */ \
TFJ(ProxyConstructor, 0) \
@@ -1032,9 +1048,8 @@ namespace internal {
/* ES6 #sec-string.prototype.tostring */ \
TFJ(StringPrototypeToString, 0) \
TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimLeft, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimRight, \
+ TFJ(StringPrototypeTrimEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(StringPrototypeTrimStart, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.valueof */ \
TFJ(StringPrototypeValueOf, 0) \
@@ -1062,16 +1077,13 @@ namespace internal {
TFJ(SymbolPrototypeValueOf, 0) \
\
/* TypedArray */ \
+ TFS(IterableToList, kIterable, kIteratorFn) \
TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize) \
TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
kByteOffset) \
- /* ES6 #sec-typedarray-buffer-byteoffset-length */ \
- TFJ(TypedArrayConstructByArrayBuffer, 5, kHolder, kBuffer, kByteOffset, \
- kLength, kElementSize) \
- TFJ(TypedArrayConstructByArrayLike, 4, kHolder, kArrayLike, kLength, \
- kElementSize) \
- /* ES6 #sec-typedarray-length */ \
- TFJ(TypedArrayConstructByLength, 3, kHolder, kLength, kElementSize) \
+ TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(TypedArrayConstructor_ConstructStub, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0) \
@@ -1089,6 +1101,9 @@ namespace internal {
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
CPP(TypedArrayPrototypeFill) \
+ /* ES6 #sec-%typedarray%.prototype.filter */ \
+ TFJ(TypedArrayPrototypeFilter, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.find */ \
TFJ(TypedArrayPrototypeFind, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1106,7 +1121,11 @@ namespace internal {
/* ES6 %TypedArray%.prototype.set */ \
TFJ(TypedArrayPrototypeSet, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-%typedarray%.prototype.slice */ \
- CPP(TypedArrayPrototypeSlice) \
+ TFJ(TypedArrayPrototypeSlice, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.prototype.subarray */ \
+ TFJ(TypedArrayPrototypeSubArray, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
TFJ(TypedArrayPrototypeToStringTag, 0) \
/* ES6 %TypedArray%.prototype.every */ \
@@ -1126,6 +1145,10 @@ namespace internal {
/* ES6 %TypedArray%.prototype.forEach */ \
TFJ(TypedArrayPrototypeForEach, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.of */ \
+ TFJ(TypedArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.from */ \
+ TFJ(TypedArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy) \
@@ -1159,6 +1182,17 @@ namespace internal {
\
/* AsyncGenerator */ \
\
+ /* Await (proposal-async-iteration/#await), with resume behaviour */ \
+ /* specific to Async Generators. Internal / Not exposed to JS code. */ \
+ TFS(AsyncGeneratorAwaitCaught, kGenerator, kValue) \
+ TFS(AsyncGeneratorAwaitUncaught, kGenerator, kValue) \
+ TFC(AsyncGeneratorAwaitFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorAwaitReject, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorYieldFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnClosedFulfill, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnClosedReject, PromiseReactionHandler, 1) \
+ TFC(AsyncGeneratorReturnFulfill, PromiseReactionHandler, 1) \
+ \
TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \
TFS(AsyncGeneratorReject, kGenerator, kValue) \
TFS(AsyncGeneratorYield, kGenerator, kValue, kIsCaught) \
@@ -1181,17 +1215,6 @@ namespace internal {
TFJ(AsyncGeneratorPrototypeThrow, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
- /* Await (proposal-async-iteration/#await), with resume behaviour */ \
- /* specific to Async Generators. Internal / Not exposed to JS code. */ \
- TFJ(AsyncGeneratorAwaitCaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitUncaught, 2, kGenerator, kAwaited) \
- TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorYieldResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kValue) \
- TFJ(AsyncGeneratorReturnResolveClosure, 1, kValue) \
- \
/* Async-from-Sync Iterator */ \
\
/* %AsyncFromSyncIteratorPrototype% */ \
@@ -1240,25 +1263,16 @@ namespace internal {
V(AsyncFromSyncIteratorPrototypeNext) \
V(AsyncFromSyncIteratorPrototypeReturn) \
V(AsyncFromSyncIteratorPrototypeThrow) \
- V(AsyncFunctionAwaitCaught) \
- V(AsyncFunctionAwaitUncaught) \
V(AsyncGeneratorResolve) \
- V(AsyncGeneratorAwaitCaught) \
- V(AsyncGeneratorAwaitUncaught) \
- V(PerformNativePromiseThen) \
V(PromiseAll) \
V(PromiseConstructor) \
- V(PromiseHandle) \
+ V(PromiseFulfillReactionJob) \
V(PromiseRace) \
- V(PromiseResolve) \
- V(PromiseResolveClosure) \
- V(RejectNativePromise) \
- V(ResolveNativePromise) \
V(ResolvePromise)
// The exception thrown in the following builtins are caught internally and will
// not be propagated further or re-thrown
-#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseHandleReject)
+#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseRejectReactionJob)
#define IGNORE_BUILTIN(...)
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 771c7243ac..cc6d237af6 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -288,26 +288,22 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
-// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
-// can tailcall to the builtin directly.
-RUNTIME_FUNCTION(Runtime_FunctionBind) {
- DCHECK_EQ(2, args.length());
- Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
- // Rewrap the arguments as builtins arguments.
- int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
- BuiltinArguments caller_args(argc, incoming->arguments() + 1);
- return DoFunctionBind(isolate, caller_args);
-}
-
// ES6 section 19.2.3.5 Function.prototype.toString ( )
BUILTIN(FunctionPrototypeToString) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (receiver->IsJSBoundFunction()) {
return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
- } else if (receiver->IsJSFunction()) {
+ }
+ if (receiver->IsJSFunction()) {
return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
}
+ // With the revised toString behavior, all callable objects are valid
+ // receivers for this method.
+ if (FLAG_harmony_function_tostring && receiver->IsJSReceiver() &&
+ JSReceiver::cast(*receiver)->map()->is_callable()) {
+ return isolate->heap()->function_native_code_string();
+ }
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index b063b314b5..07a56c86ed 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -84,9 +84,8 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
BIND(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), receiver);
}
BIND(&if_receiverisclosed);
@@ -110,10 +109,7 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
}
BIND(&if_receiverisrunning);
- {
- CallRuntime(Runtime::kThrowGeneratorRunning, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kGeneratorRunning); }
BIND(&if_exception);
{
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index bb4b66e3a4..edc529c798 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -73,8 +73,8 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
Node* frame = Parameter(Descriptor::kFrame);
- Node* length = SmiToWord(Parameter(Descriptor::kLength));
- Node* mapped_count = SmiToWord(Parameter(Descriptor::kMappedCount));
+ Node* length = SmiToIntPtr(Parameter(Descriptor::kLength));
+ Node* mapped_count = SmiToIntPtr(Parameter(Descriptor::kMappedCount));
// Check if we can allocate in new space.
ElementsKind kind = PACKED_ELEMENTS;
@@ -164,8 +164,8 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
{
// Allocate in old space (or large object space).
TailCallRuntime(Runtime::kNewArgumentsElements, NoContextConstant(),
- BitcastWordToTagged(frame), SmiFromWord(length),
- SmiFromWord(mapped_count));
+ BitcastWordToTagged(frame), SmiFromIntPtr(length),
+ SmiFromIntPtr(mapped_count));
}
}
@@ -202,7 +202,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Node* mask;
GetMarkBit(object, &cell, &mask);
- mask = TruncateWordToWord32(mask);
+ mask = TruncateIntPtrToInt32(mask);
Node* bits = Load(MachineType::Int32(), cell);
Node* bit_0 = Word32And(bits, mask);
@@ -239,7 +239,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
Node* cell;
Node* mask;
GetMarkBit(object, &cell, &mask);
- mask = TruncateWordToWord32(mask);
+ mask = TruncateIntPtrToInt32(mask);
// Non-white has 1 for the first bit, so we only need to check for the first
// bit.
return Word32Equal(Word32And(Load(MachineType::Int32(), cell), mask),
@@ -628,6 +628,9 @@ class InternalBuiltinsAssembler : public CodeStubAssembler {
void EnterMicrotaskContext(TNode<Context> context);
void LeaveMicrotaskContext();
+ void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> payload);
+
TNode<Object> GetPendingException() {
auto ref = ExternalReference(kPendingExceptionAddress, isolate());
return TNode<Object>::UncheckedCast(
@@ -745,6 +748,19 @@ void InternalBuiltinsAssembler::LeaveMicrotaskContext() {
}
}
+void InternalBuiltinsAssembler::RunPromiseHook(
+ Runtime::FunctionId id, TNode<Context> context,
+ SloppyTNode<HeapObject> payload) {
+ Label hook(this, Label::kDeferred), done_hook(this);
+ Branch(IsPromiseHookEnabledOrDebugIsActive(), &hook, &done_hook);
+ BIND(&hook);
+ {
+ CallRuntime(id, context, payload);
+ Goto(&done_hook);
+ }
+ BIND(&done_hook);
+}
+
TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
Node* microtask = Parameter(Descriptor::kMicrotask);
@@ -812,13 +828,15 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
}
TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
- Label init_queue_loop(this);
+ // Load the current context from the isolate.
+ TNode<Context> current_context = GetCurrentContext();
+ Label init_queue_loop(this);
Goto(&init_queue_loop);
BIND(&init_queue_loop);
{
TVARIABLE(IntPtrT, index, IntPtrConstant(0));
- Label loop(this, &index);
+ Label loop(this, &index), loop_next(this);
TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount();
ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant());
@@ -830,222 +848,193 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0)));
SetPendingMicrotaskCount(IntPtrConstant(0));
- SetMicrotaskQueue(
- TNode<FixedArray>::UncheckedCast(EmptyFixedArrayConstant()));
+ SetMicrotaskQueue(EmptyFixedArrayConstant());
Goto(&loop);
BIND(&loop);
{
- TNode<HeapObject> microtask =
- TNode<HeapObject>::UncheckedCast(LoadFixedArrayElement(queue, index));
- index = IntPtrAdd(index, IntPtrConstant(1));
+ TNode<HeapObject> microtask = TNode<HeapObject>::UncheckedCast(
+ LoadFixedArrayElement(queue, index.value()));
+ index = IntPtrAdd(index.value(), IntPtrConstant(1));
CSA_ASSERT(this, TaggedIsNotSmi(microtask));
TNode<Map> microtask_map = LoadMap(microtask);
TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map);
- Label is_call_handler_info(this);
- Label is_function(this);
- Label is_promise_resolve_thenable_job(this);
- Label is_promise_reaction_job(this);
- Label is_unreachable(this);
-
- int32_t case_values[] = {TUPLE3_TYPE, // CallHandlerInfo
- JS_FUNCTION_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
- PROMISE_REACTION_JOB_INFO_TYPE};
-
- Label* case_labels[] = {&is_call_handler_info, &is_function,
- &is_promise_resolve_thenable_job,
- &is_promise_reaction_job};
-
+ VARIABLE(var_exception, MachineRepresentation::kTagged,
+ TheHoleConstant());
+ Label if_exception(this, Label::kDeferred);
+ Label is_callable(this), is_callback(this),
+ is_promise_fulfill_reaction_job(this),
+ is_promise_reject_reaction_job(this),
+ is_promise_resolve_thenable_job(this),
+ is_unreachable(this, Label::kDeferred);
+
+ int32_t case_values[] = {CALLABLE_TASK_TYPE, CALLBACK_TASK_TYPE,
+ PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
+ PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE};
+ Label* case_labels[] = {
+ &is_callable, &is_callback, &is_promise_fulfill_reaction_job,
+ &is_promise_reject_reaction_job, &is_promise_resolve_thenable_job};
static_assert(arraysize(case_values) == arraysize(case_labels), "");
Switch(microtask_type, &is_unreachable, case_values, case_labels,
arraysize(case_labels));
- BIND(&is_call_handler_info);
+ BIND(&is_callable);
{
- // Bailout to C++ slow path for the remainder of the loop.
- auto index_ref =
- ExternalReference(kMicrotaskQueueBailoutIndexAddress, isolate());
- auto count_ref =
- ExternalReference(kMicrotaskQueueBailoutCountAddress, isolate());
- auto rep = kIntSize == 4 ? MachineRepresentation::kWord32
- : MachineRepresentation::kWord64;
-
- // index was pre-incremented, decrement for bailout to C++.
- Node* value = IntPtrSub(index, IntPtrConstant(1));
-
- if (kPointerSize == 4) {
- DCHECK_EQ(kIntSize, 4);
- StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
- StoreNoWriteBarrier(rep, ExternalConstant(count_ref), num_tasks);
- } else {
- Node* count = num_tasks;
- if (kIntSize == 4) {
- value = TruncateInt64ToInt32(value);
- count = TruncateInt64ToInt32(count);
- }
- StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value);
- StoreNoWriteBarrier(rep, ExternalConstant(count_ref), count);
- }
-
- Return(queue);
- }
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context =
+ LoadObjectField<Context>(microtask, CallableTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
- BIND(&is_function);
- {
- Label cont(this);
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> fn_context = TNode<Context>::UncheckedCast(
- LoadObjectField(microtask, JSFunction::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(fn_context));
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(microtask_context);
SetCurrentContext(native_context);
- EnterMicrotaskContext(fn_context);
- Node* const call = CallJS(CodeFactory::Call(isolate()), native_context,
- microtask, UndefinedConstant());
- GotoIfException(call, &cont);
- Goto(&cont);
- BIND(&cont);
+
+ TNode<JSReceiver> callable = LoadObjectField<JSReceiver>(
+ microtask, CallableTask::kCallableOffset);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ microtask_context, callable, UndefinedConstant());
+ GotoIfException(result, &if_exception, &var_exception);
LeaveMicrotaskContext();
- SetCurrentContext(old_context);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
+
+ BIND(&is_callback);
+ {
+ Node* const microtask_callback =
+ LoadObjectField(microtask, CallbackTask::kCallbackOffset);
+ Node* const microtask_data =
+ LoadObjectField(microtask, CallbackTask::kDataOffset);
+
+ // If this turns out to become a bottleneck because of the calls
+ // to C++ via CEntryStub, we can choose to speed them up using a
+ // similar mechanism that we use for the CallApiFunction stub,
+ // except that calling the MicrotaskCallback is even easier, since
+ // it doesn't accept any tagged parameters, doesn't return a value
+ // and ignores exceptions.
+ //
+ // But from our current measurements it doesn't seem to be a
+ // serious performance problem, even if the microtask is full
+ // of CallHandlerTasks (which is not a realistic use case anyways).
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ Goto(&loop_next);
}
BIND(&is_promise_resolve_thenable_job);
{
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> microtask_context =
- TNode<Context>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseResolveThenableJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ EnterMicrotaskContext(microtask_context);
SetCurrentContext(native_context);
+
+ Node* const promise_to_resolve = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset);
+ Node* const then = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kThenOffset);
+ Node* const thenable = LoadObjectField(
+ microtask, PromiseResolveThenableJobTask::kThenableOffset);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
+ promise_to_resolve, thenable, then);
+ GotoIfException(result, &if_exception, &var_exception);
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
+
+ BIND(&is_promise_fulfill_reaction_job);
+ {
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
EnterMicrotaskContext(microtask_context);
+ SetCurrentContext(native_context);
- Label if_unhandled_exception(this), done(this);
- Node* const ret = CallBuiltin(Builtins::kPromiseResolveThenableJob,
- native_context, microtask);
- GotoIfException(ret, &if_unhandled_exception, &exception);
- Goto(&done);
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const payload =
+ LoadObjectField(microtask, PromiseReactionJobTask::kPayloadOffset);
- BIND(&if_unhandled_exception);
- CallRuntime(Runtime::kReportMessage, native_context, exception.value());
- Goto(&done);
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context, payload);
- BIND(&done);
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseFulfillReactionJob, microtask_context,
+ argument, handler, payload);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context, payload);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
}
- BIND(&is_promise_reaction_job);
+ BIND(&is_promise_reject_reaction_job);
{
- Label if_multiple(this);
- Label if_single(this);
-
- Node* const value =
- LoadObjectField(microtask, PromiseReactionJobInfo::kValueOffset);
- Node* const tasks =
- LoadObjectField(microtask, PromiseReactionJobInfo::kTasksOffset);
- Node* const deferred_promises = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredPromiseOffset);
- Node* const deferred_on_resolves = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredOnResolveOffset);
- Node* const deferred_on_rejects = LoadObjectField(
- microtask, PromiseReactionJobInfo::kDeferredOnRejectOffset);
-
- TNode<Context> old_context = GetCurrentContext();
- TNode<Context> microtask_context = TNode<Context>::UncheckedCast(
- LoadObjectField(microtask, PromiseReactionJobInfo::kContextOffset));
- TNode<Context> native_context =
- TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context));
- SetCurrentContext(native_context);
+ // Enter the context of the {microtask}.
+ TNode<Context> microtask_context = LoadObjectField<Context>(
+ microtask, PromiseReactionJobTask::kContextOffset);
+ TNode<Context> native_context = LoadNativeContext(microtask_context);
+ CSA_ASSERT(this, IsNativeContext(native_context));
EnterMicrotaskContext(microtask_context);
+ SetCurrentContext(native_context);
+
+ Node* const argument =
+ LoadObjectField(microtask, PromiseReactionJobTask::kArgumentOffset);
+ Node* const handler =
+ LoadObjectField(microtask, PromiseReactionJobTask::kHandlerOffset);
+ Node* const payload =
+ LoadObjectField(microtask, PromiseReactionJobTask::kPayloadOffset);
+
+ // Run the promise before/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context, payload);
+
+ Node* const result =
+ CallBuiltin(Builtins::kPromiseRejectReactionJob, microtask_context,
+ argument, handler, payload);
+ GotoIfException(result, &if_exception, &var_exception);
+
+ // Run the promise after/debug hook if enabled.
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context, payload);
- Branch(IsFixedArray(deferred_promises), &if_multiple, &if_single);
-
- BIND(&if_single);
- {
- CallBuiltin(Builtins::kPromiseHandle, native_context, value, tasks,
- deferred_promises, deferred_on_resolves,
- deferred_on_rejects);
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
- }
-
- BIND(&if_multiple);
- {
- TVARIABLE(IntPtrT, inner_index, IntPtrConstant(0));
- TNode<IntPtrT> inner_length =
- LoadAndUntagFixedArrayBaseLength(deferred_promises);
- Label inner_loop(this, &inner_index), done(this);
-
- CSA_ASSERT(this, IntPtrGreaterThan(inner_length, IntPtrConstant(0)));
- Goto(&inner_loop);
- BIND(&inner_loop);
- {
- Node* const task = LoadFixedArrayElement(tasks, inner_index);
- Node* const deferred_promise =
- LoadFixedArrayElement(deferred_promises, inner_index);
- Node* const deferred_on_resolve =
- LoadFixedArrayElement(deferred_on_resolves, inner_index);
- Node* const deferred_on_reject =
- LoadFixedArrayElement(deferred_on_rejects, inner_index);
- CallBuiltin(Builtins::kPromiseHandle, native_context, value, task,
- deferred_promise, deferred_on_resolve,
- deferred_on_reject);
- inner_index = IntPtrAdd(inner_index, IntPtrConstant(1));
- Branch(IntPtrLessThan(inner_index, inner_length), &inner_loop,
- &done);
- }
- BIND(&done);
-
- LeaveMicrotaskContext();
- SetCurrentContext(old_context);
-
- Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop);
- }
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
}
BIND(&is_unreachable);
Unreachable();
- }
- }
-}
-TF_BUILTIN(PromiseResolveThenableJob, InternalBuiltinsAssembler) {
- VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant());
- Callable call = CodeFactory::Call(isolate());
- Label reject_promise(this, Label::kDeferred);
- TNode<PromiseResolveThenableJobInfo> microtask =
- TNode<PromiseResolveThenableJobInfo>::UncheckedCast(
- Parameter(Descriptor::kMicrotask));
- TNode<Context> context =
- TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
-
- TNode<JSReceiver> thenable = TNode<JSReceiver>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kThenableOffset));
- TNode<JSReceiver> then = TNode<JSReceiver>::UncheckedCast(
- LoadObjectField(microtask, PromiseResolveThenableJobInfo::kThenOffset));
- TNode<JSFunction> resolve = TNode<JSFunction>::UncheckedCast(LoadObjectField(
- microtask, PromiseResolveThenableJobInfo::kResolveOffset));
- TNode<JSFunction> reject = TNode<JSFunction>::UncheckedCast(
- LoadObjectField(microtask, PromiseResolveThenableJobInfo::kRejectOffset));
-
- Node* const result = CallJS(call, context, then, thenable, resolve, reject);
- GotoIfException(result, &reject_promise, &exception);
- Return(UndefinedConstant());
+ BIND(&if_exception);
+ {
+ // Report unhandled exceptions from microtasks.
+ CallRuntime(Runtime::kReportMessage, current_context,
+ var_exception.value());
+ LeaveMicrotaskContext();
+ SetCurrentContext(current_context);
+ Goto(&loop_next);
+ }
- BIND(&reject_promise);
- CallJS(call, context, reject, UndefinedConstant(), exception.value());
- Return(UndefinedConstant());
+ BIND(&loop_next);
+ Branch(IntPtrLessThan(index.value(), num_tasks), &loop, &init_queue_loop);
+ }
+ }
}
TF_BUILTIN(AbortJS, CodeStubAssembler) {
diff --git a/deps/v8/src/builtins/builtins-intl.h b/deps/v8/src/builtins/builtins-intl.h
index 8dda0c0898..419ff14db1 100644
--- a/deps/v8/src/builtins/builtins-intl.h
+++ b/deps/v8/src/builtins/builtins-intl.h
@@ -27,4 +27,4 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_H_
+#endif // V8_BUILTINS_BUILTINS_INTL_H_
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index f6a6d85880..21f6039f08 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -11,11 +11,24 @@ namespace internal {
using compiler::Node;
+Node* IteratorBuiltinsAssembler::GetIteratorMethod(Node* context,
+ Node* object) {
+ return GetProperty(context, object, factory()->iterator_symbol());
+}
+
IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Node* object,
Label* if_exception,
Variable* exception) {
- Node* method = GetProperty(context, object, factory()->iterator_symbol());
+ Node* method = GetIteratorMethod(context, object);
+ return GetIterator(context, object, method, if_exception, exception);
+}
+
+IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
+ Node* object,
+ Node* method,
+ Label* if_exception,
+ Variable* exception) {
GotoIfException(method, if_exception, exception);
Callable callable = CodeFactory::Call(isolate());
@@ -27,13 +40,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
BIND(&if_notobject);
- {
- Node* ret =
- CallRuntime(Runtime::kThrowTypeError, context,
- SmiConstant(MessageTemplate::kNotAnIterator), iterator);
- GotoIfException(ret, if_exception, exception);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNotAnIterator, iterator); }
BIND(&get_next);
Node* const next = GetProperty(context, iterator, factory()->next_string());
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 42627b8437..13464516d6 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -17,11 +17,17 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
+ // Returns object[Symbol.iterator].
+ Node* GetIteratorMethod(Node* context, Node* object);
+
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
IteratorRecord GetIterator(Node* context, Node* object,
Label* if_exception = nullptr,
Variable* exception = nullptr);
+ IteratorRecord GetIterator(Node* context, Node* object, Node* method,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorstep
// Returns `false` if the iterator is done, otherwise returns an
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index d588113cdd..be58e8210e 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -268,7 +268,7 @@ TF_BUILTIN(MathClz32, CodeStubAssembler) {
BIND(&if_xissmi);
{
- var_clz32_x.Bind(SmiToWord32(x));
+ var_clz32_x.Bind(SmiToInt32(x));
Goto(&do_clz32);
}
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 821dac9cc0..1340c33eb1 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -319,12 +319,14 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
&if_inputissigned32);
- // Check if the absolute {input} value is in the ]0.01,1e9[ range.
+ // Check if the absolute {input} value is in the [1,1<<31[ range.
+ // Take the generic path for the range [0,1[ because the result
+ // could be -0.
Node* input_value_abs = Float64Abs(input_value);
- GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1e9)),
+ GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1u << 31)),
&if_generic);
- Branch(Float64LessThan(Float64Constant(0.01), input_value_abs),
+ Branch(Float64LessThanOrEqual(Float64Constant(1), input_value_abs),
&if_inputissigned32, &if_generic);
// Return the truncated int32 value, and return the tagged result.
@@ -904,8 +906,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
}
BIND(&dividend_is_not_zero);
- Node* untagged_divisor = SmiToWord32(divisor);
- Node* untagged_dividend = SmiToWord32(dividend);
+ Node* untagged_divisor = SmiToInt32(divisor);
+ Node* untagged_dividend = SmiToInt32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
@@ -929,7 +931,7 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
Node* truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
- Return(SmiFromWord32(untagged_result));
+ Return(SmiFromInt32(untagged_result));
// Bailout: convert {dividend} and {divisor} to double and do double
// division.
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 4cd012e6f0..1ebfbacf38 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -16,6 +16,8 @@ namespace internal {
// ES6 section 19.1 Object Objects
typedef compiler::Node Node;
+template <class T>
+using TNode = CodeStubAssembler::TNode<T>;
class ObjectBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -34,6 +36,46 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable,
Node* enumerable, Node* configurable);
Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout);
+
+ Node* IsSpecialReceiverMap(SloppyTNode<Map> map);
+};
+
+class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
+ public:
+ explicit ObjectEntriesValuesBuiltinsAssembler(
+ compiler::CodeAssemblerState* state)
+ : ObjectBuiltinsAssembler(state) {}
+
+ protected:
+ enum CollectType { kEntries, kValues };
+
+ TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
+
+ TNode<BoolT> IsPropertyEnumerable(TNode<Uint32T> details);
+
+ TNode<BoolT> IsPropertyKindAccessor(TNode<Uint32T> kind);
+
+ TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind);
+
+ TNode<Uint32T> HasHiddenPrototype(TNode<Map> map);
+
+ TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) {
+ return DecodeWord32<PropertyDetails::KindField>(details);
+ }
+
+ void GetOwnValuesOrEntries(TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type);
+
+ void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
+
+ TNode<JSArray> FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type);
+
+ TNode<JSArray> FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> values_or_entries,
+ TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty);
};
void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
@@ -97,6 +139,253 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
return js_desc;
}
+Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ uint32_t mask =
+ Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
+ USE(mask);
+ // Interceptors or access checks imply special receiver.
+ CSA_ASSERT(this,
+ SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
+ return is_special;
+}
+
+TNode<Word32T>
+ObjectEntriesValuesBuiltinsAssembler::IsStringWrapperElementsKind(
+ TNode<Map> map) {
+ Node* kind = LoadMapElementsKind(map);
+ return Word32Or(
+ Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS)));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable(
+ TNode<Uint32T> details) {
+ TNode<Uint32T> attributes =
+ DecodeWord32<PropertyDetails::AttributesField>(details);
+ return IsNotSetWord32(attributes, PropertyAttributes::DONT_ENUM);
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor));
+}
+
+TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
+ TNode<Uint32T> kind) {
+ return Word32Equal(kind, Int32Constant(PropertyKind::kData));
+}
+
+TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
+ TNode<Map> map) {
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field3);
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
+ TNode<Context> context, TNode<Object> maybe_object,
+ CollectType collect_type) {
+ TNode<JSReceiver> receiver = ToObject(context, maybe_object);
+
+ Label if_call_runtime_with_fast_path(this, Label::kDeferred),
+ if_call_runtime(this, Label::kDeferred),
+ if_no_properties(this, Label::kDeferred);
+
+ TNode<Map> map = LoadMap(receiver);
+ GotoIfNot(IsJSObjectMap(map), &if_call_runtime);
+ GotoIfMapHasSlowProperties(map, &if_call_runtime);
+
+ TNode<JSObject> object = CAST(receiver);
+ TNode<FixedArrayBase> elements = LoadElements(object);
+ // If the object has elements, we treat it as slow case.
+ // So, we go to runtime call.
+ GotoIfNot(IsEmptyFixedArray(elements), &if_call_runtime_with_fast_path);
+
+ TNode<JSArray> result = FastGetOwnValuesOrEntries(
+ context, object, &if_call_runtime_with_fast_path, &if_no_properties,
+ collect_type);
+ Return(result);
+
+ BIND(&if_no_properties);
+ {
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ Node* empty_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+ Return(empty_array);
+ }
+
+ BIND(&if_call_runtime_with_fast_path);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(CallRuntime(Runtime::kObjectEntries, context, object));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(CallRuntime(Runtime::kObjectValues, context, object));
+ }
+ }
+
+ BIND(&if_call_runtime);
+ {
+ // In slow case, we simply call runtime.
+ if (collect_type == CollectType::kEntries) {
+ Return(
+ CallRuntime(Runtime::kObjectEntriesSkipFastPath, context, receiver));
+ } else {
+ DCHECK(collect_type == CollectType::kValues);
+ Return(
+ CallRuntime(Runtime::kObjectValuesSkipFastPath, context, receiver));
+ }
+ }
+}
+
+void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
+ TNode<Map> map, Label* if_slow) {
+ GotoIf(IsStringWrapperElementsKind(map), if_slow);
+ GotoIf(IsSpecialReceiverMap(map), if_slow);
+ GotoIf(HasHiddenPrototype(map), if_slow);
+ GotoIf(IsDictionaryMap(map), if_slow);
+}
+
+TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
+ TNode<Context> context, TNode<JSObject> object,
+ Label* if_call_runtime_with_fast_path, Label* if_no_properties,
+ CollectType collect_type) {
+ Node* native_context = LoadNativeContext(context);
+ TNode<Map> array_map =
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
+ TNode<Map> map = LoadMap(object);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+
+ Label if_has_enum_cache(this), if_not_has_enum_cache(this),
+ collect_entries(this);
+ Node* object_enum_length =
+ DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
+ Node* has_enum_cache = WordNotEqual(
+ object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel));
+
+ // In case, we found enum_cache in object,
+ // we use it as array_length becuase it has same size for
+ // Object.(entries/values) result array object length.
+ // So object_enum_length use less memory space than
+ // NumberOfOwnDescriptorsBits value.
+ // And in case, if enum_cache_not_found,
+ // we call runtime and initialize enum_cache for subsequent call of
+ // CSA fast path.
+ Branch(has_enum_cache, &if_has_enum_cache, if_call_runtime_with_fast_path);
+
+ BIND(&if_has_enum_cache);
+ {
+ GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
+ TNode<FixedArray> values_or_entries = TNode<FixedArray>::UncheckedCast(
+ AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation));
+
+ // If in case we have enum_cache,
+ // we can't detect accessor of object until loop through descritpros.
+ // So if object might have accessor,
+ // we will remain invalid addresses of FixedArray.
+ // Because in that case, we need to jump to runtime call.
+ // So the array filled by the-hole even if enum_cache exists.
+ FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries,
+ IntPtrConstant(0), object_enum_length,
+ Heap::kTheHoleValueRootIndex);
+
+ TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0));
+ Variable* vars[] = {&var_descriptor_number, &var_result_index};
+ // Let desc be ? O.[[GetOwnProperty]](key).
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ Label loop(this, 2, vars), after_loop(this), loop_condition(this);
+ Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length),
+ &after_loop, &loop);
+
+ // We dont use BuildFastLoop.
+ // Instead, we use hand-written loop
+ // because of we need to use 'continue' functionality.
+ BIND(&loop);
+ {
+ // Currently, we will not invoke getters,
+ // so, map will not be changed.
+ CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
+ TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
+ TruncateIntPtrToInt32(var_descriptor_number.value()));
+ Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index);
+
+ // Skip Symbols.
+ GotoIf(IsSymbol(next_key), &loop_condition);
+
+ TNode<Uint32T> details = TNode<Uint32T>::UncheckedCast(
+ DescriptorArrayGetDetails(descriptors, descriptor_index));
+ TNode<Uint32T> kind = LoadPropertyKind(details);
+
+ // If property is accessor, we escape fast path and call runtime.
+ GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path);
+ CSA_ASSERT(this, IsPropertyKindData(kind));
+
+ // If desc is not undefined and desc.[[Enumerable]] is true, then
+ GotoIfNot(IsPropertyEnumerable(details), &loop_condition);
+
+ VARIABLE(var_property_value, MachineRepresentation::kTagged,
+ UndefinedConstant());
+ Node* descriptor_name_index = DescriptorArrayToKeyIndex(
+ TruncateIntPtrToInt32(var_descriptor_number.value()));
+
+ // Let value be ? Get(O, key).
+ LoadPropertyFromFastObject(object, map, descriptors,
+ descriptor_name_index, details,
+ &var_property_value);
+
+ // If kind is "value", append value to properties.
+ Node* value = var_property_value.value();
+
+ if (collect_type == CollectType::kEntries) {
+ // Let entry be CreateArrayFromList(« key, value »).
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr,
+ IntPtrConstant(2));
+ StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER);
+ value = array;
+ }
+
+ StoreFixedArrayElement(values_or_entries, var_result_index.value(),
+ value);
+ Increment(&var_result_index, 1);
+ Goto(&loop_condition);
+
+ BIND(&loop_condition);
+ {
+ Increment(&var_descriptor_number, 1);
+ Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length),
+ &after_loop, &loop);
+ }
+ }
+ BIND(&after_loop);
+ return FinalizeValuesOrEntriesJSArray(context, values_or_entries,
+ var_result_index.value(), array_map,
+ if_no_properties);
+ }
+}
+
+TNode<JSArray>
+ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
+ TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size,
+ TNode<Map> array_map, Label* if_empty) {
+ CSA_ASSERT(this, IsJSArrayMap(array_map));
+
+ GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ array_map, SmiTag(size), nullptr);
+ StoreObjectField(array, JSArray::kElementsOffset, result);
+ return TNode<JSArray>::UncheckedCast(array);
+}
+
TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
@@ -105,7 +394,7 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) {
GotoIf(IsNullOrUndefined(receiver), &if_null_or_undefined);
TNode<Object> method =
- CAST(GetProperty(context, receiver, factory()->toString_string()));
+ GetProperty(context, receiver, factory()->toString_string());
Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver));
BIND(&if_null_or_undefined);
@@ -266,6 +555,22 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
}
}
+TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kValues);
+}
+
+TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
+ TNode<JSObject> object =
+ TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
+ TNode<Context> context =
+ TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ GetOwnValuesOrEntries(context, object, CollectType::kEntries);
+}
+
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -304,7 +609,7 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver);
// Simulate the ToObject invocation on {receiver}.
- CallBuiltin(Builtins::kToObject, context, receiver);
+ ToObject(context, receiver);
Unreachable();
}
@@ -367,9 +672,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring);
BIND(&if_tagisnotstring);
{
- var_tag.Bind(
- CallStub(Builtins::CallableFor(isolate(), Builtins::kClassOf),
- context, receiver));
+ var_tag.Bind(CallRuntime(Runtime::kClassOf, context, receiver));
Goto(&if_tagisstring);
}
BIND(&if_tagisstring);
@@ -574,9 +877,8 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
BIND(&return_generic);
{
- Node* tag = GetProperty(
- context, CallBuiltin(Builtins::kToObject, context, receiver),
- LoadRoot(Heap::kto_string_tag_symbolRootIndex));
+ Node* tag = GetProperty(context, ToObject(context, receiver),
+ LoadRoot(Heap::kto_string_tag_symbolRootIndex));
GotoIf(TaggedIsSmi(tag), &return_default);
GotoIfNot(IsString(tag), &return_default);
ReturnToStringFormat(context, tag);
@@ -592,7 +894,7 @@ TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* context = Parameter(Descriptor::kContext);
- Return(CallBuiltin(Builtins::kToObject, context, receiver));
+ Return(ToObject(context, receiver));
}
// ES #sec-object.create
@@ -760,7 +1062,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// Get the initial map from the function, jumping to the runtime if we don't
// have one.
- Label runtime(this);
+ Label done(this), runtime(this);
GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime);
Node* maybe_map =
LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset);
@@ -790,7 +1092,13 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
executing);
- Return(result);
+ GotoIfNot(HasInstanceType(maybe_map, JS_ASYNC_GENERATOR_OBJECT_TYPE), &done);
+ StoreObjectFieldNoWriteBarrier(
+ result, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
+ Goto(&done);
+
+ BIND(&done);
+ { Return(result); }
BIND(&runtime);
{
@@ -810,7 +1118,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
Node* key = args.GetOptionalArgumentValue(1);
// 1. Let obj be ? ToObject(O).
- object = CallBuiltin(Builtins::kToObject, context, object);
+ object = ToObject(context, object);
// 2. Let key be ? ToPropertyKey(P).
key = ToName(context, key);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 36f7ebfc0a..4e353b9260 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -395,31 +395,6 @@ BUILTIN(ObjectIsSealed) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-BUILTIN(ObjectValues) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> values;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
-BUILTIN(ObjectEntries) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> entries;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, entries,
- JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(entries);
-}
-
BUILTIN(ObjectGetOwnPropertyDescriptors) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 1a3ebcd892..d3ea3f82e2 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -22,19 +22,26 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
- Node* const initial_map =
+ Node* const promise_map =
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const instance = AllocateJSObjectFromMap(initial_map);
- return instance;
+ Node* const promise = Allocate(JSPromise::kSizeWithEmbedderFields);
+ StoreMapNoWriteBarrier(promise, promise_map);
+ StoreObjectFieldRoot(promise, JSPromise::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ return promise;
}
void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
STATIC_ASSERT(v8::Promise::kPending == 0);
+ StoreObjectFieldNoWriteBarrier(promise, JSPromise::kReactionsOrResultOffset,
+ SmiConstant(Smi::kZero));
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset,
- SmiConstant(0));
+ SmiConstant(Smi::kZero));
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
int offset = JSPromise::kSize + i * kPointerSize;
- StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(0));
+ StoreObjectFieldNoWriteBarrier(promise, offset, SmiConstant(Smi::kZero));
}
}
@@ -58,9 +65,11 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(
Node* context, v8::Promise::PromiseState status, Node* result) {
- Node* const instance = AllocateJSPromise(context);
+ DCHECK_NE(Promise::kPending, status);
- StoreObjectFieldNoWriteBarrier(instance, JSPromise::kResultOffset, result);
+ Node* const instance = AllocateJSPromise(context);
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kReactionsOrResultOffset,
+ result);
STATIC_ASSERT(JSPromise::kStatusShift == 0);
StoreObjectFieldNoWriteBarrier(instance, JSPromise::kFlagsOffset,
SmiConstant(status));
@@ -86,66 +95,68 @@ PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
promise, debug_event, native_context);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* const resolve_info =
- LoadContextElement(native_context, Context::PROMISE_RESOLVE_SHARED_FUN);
+ Node* const resolve_info = LoadContextElement(
+ native_context,
+ Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
Node* const resolve =
AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
- Node* const reject_info =
- LoadContextElement(native_context, Context::PROMISE_REJECT_SHARED_FUN);
+ Node* const reject_info = LoadContextElement(
+ native_context,
+ Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX);
Node* const reject =
AllocateFunctionWithMapAndContext(map, reject_info, promise_context);
return std::make_pair(resolve, reject);
}
-Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
- Node* constructor,
- Node* debug_event) {
- if (debug_event == nullptr) {
- debug_event = TrueConstant();
- }
+// ES #sec-newpromisecapability
+TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const constructor = Parameter(Descriptor::kConstructor);
+ Node* const debug_event = Parameter(Descriptor::kDebugEvent);
+ Node* const native_context = LoadNativeContext(context);
- Label if_not_constructor(this, Label::kDeferred);
+ Label if_not_constructor(this, Label::kDeferred),
+ if_notcallable(this, Label::kDeferred), if_fast_promise_capability(this),
+ if_slow_promise_capability(this, Label::kDeferred);
GotoIf(TaggedIsSmi(constructor), &if_not_constructor);
GotoIfNot(IsConstructorMap(LoadMap(constructor)), &if_not_constructor);
-
- Node* native_context = LoadNativeContext(context);
-
- Node* map = LoadRoot(Heap::kTuple3MapRootIndex);
- Node* capability = AllocateStruct(map);
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(capability);
-
- Label if_builtin_promise(this), if_custom_promise(this, Label::kDeferred),
- out(this);
Branch(WordEqual(constructor,
LoadContextElement(native_context,
Context::PROMISE_FUNCTION_INDEX)),
- &if_builtin_promise, &if_custom_promise);
+ &if_fast_promise_capability, &if_slow_promise_capability);
- BIND(&if_builtin_promise);
+ BIND(&if_fast_promise_capability);
{
- Node* promise = AllocateJSPromise(context);
- PromiseInit(promise);
- StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
+ Node* promise =
+ AllocateAndInitJSPromise(native_context, UndefinedConstant());
Node* resolve = nullptr;
Node* reject = nullptr;
-
std::tie(resolve, reject) =
CreatePromiseResolvingFunctions(promise, debug_event, native_context);
- StoreObjectField(capability, PromiseCapability::kResolveOffset, resolve);
- StoreObjectField(capability, PromiseCapability::kRejectOffset, reject);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
- CallRuntime(Runtime::kPromiseHookInit, context, promise,
- UndefinedConstant());
- Goto(&out);
+ Node* capability = Allocate(PromiseCapability::kSize);
+ StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(capability,
+ PromiseCapability::kPromiseOffset, promise);
+ StoreObjectFieldNoWriteBarrier(capability,
+ PromiseCapability::kResolveOffset, resolve);
+ StoreObjectFieldNoWriteBarrier(capability, PromiseCapability::kRejectOffset,
+ reject);
+ Return(capability);
}
- BIND(&if_custom_promise);
+ BIND(&if_slow_promise_capability);
{
- Label if_notcallable(this, Label::kDeferred);
+ Node* capability = Allocate(PromiseCapability::kSize);
+ StoreMapNoWriteBarrier(capability, Heap::kPromiseCapabilityMapRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kPromiseOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kResolveOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset,
+ Heap::kUndefinedValueRootIndex);
+
Node* executor_context =
CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
Node* executor_info = LoadContextElement(
@@ -155,8 +166,9 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
Node* executor = AllocateFunctionWithMapAndContext(
function_map, executor_info, executor_context);
- Node* promise = ConstructJS(CodeFactory::Construct(isolate()), context,
- constructor, executor);
+ Node* promise = ConstructJS(CodeFactory::Construct(isolate()),
+ native_context, constructor, executor);
+ StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
Node* resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
@@ -167,26 +179,14 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
LoadObjectField(capability, PromiseCapability::kRejectOffset);
GotoIf(TaggedIsSmi(reject), &if_notcallable);
GotoIfNot(IsCallable(reject), &if_notcallable);
-
- StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise);
-
- Goto(&out);
-
- BIND(&if_notcallable);
- StoreObjectField(capability, PromiseCapability::kPromiseOffset,
- UndefinedConstant());
- StoreObjectField(capability, PromiseCapability::kResolveOffset,
- UndefinedConstant());
- StoreObjectField(capability, PromiseCapability::kRejectOffset,
- UndefinedConstant());
- ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
+ Return(capability);
}
BIND(&if_not_constructor);
ThrowTypeError(context, MessageTemplate::kNotConstructor, constructor);
- BIND(&out);
- return var_result.value();
+ BIND(&if_notcallable);
+ ThrowTypeError(context, MessageTemplate::kPromiseNonCallable);
}
Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
@@ -236,7 +236,7 @@ Node* PromiseBuiltinsAssembler::IsPromiseStatus(
Node* PromiseBuiltinsAssembler::PromiseStatus(Node* promise) {
STATIC_ASSERT(JSPromise::kStatusShift == 0);
Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
- return Word32And(SmiToWord32(flags), Int32Constant(JSPromise::kStatusMask));
+ return Word32And(SmiToInt32(flags), Int32Constant(JSPromise::kStatusMask));
}
void PromiseBuiltinsAssembler::PromiseSetStatus(
@@ -258,579 +258,299 @@ void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) {
StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
}
-Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor) {
- Isolate* isolate = this->isolate();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(default_constructor);
-
- // 2. Let C be ? Get(O, "constructor").
- Node* const constructor =
- GetProperty(context, object, isolate->factory()->constructor_string());
-
- // 3. If C is undefined, return defaultConstructor.
- Label out(this);
- GotoIf(IsUndefined(constructor), &out);
-
- // 4. If Type(C) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, constructor,
- MessageTemplate::kConstructorNotReceiver);
-
- // 5. Let S be ? Get(C, @@species).
- Node* const species =
- GetProperty(context, constructor, isolate->factory()->species_symbol());
-
- // 6. If S is either undefined or null, return defaultConstructor.
- GotoIf(IsNullOrUndefined(species), &out);
-
- // 7. If IsConstructor(S) is true, return S.
- Label throw_error(this);
- GotoIf(TaggedIsSmi(species), &throw_error);
- GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
- var_result.Bind(species);
- Goto(&out);
-
- // 8. Throw a TypeError exception.
- BIND(&throw_error);
- ThrowTypeError(context, MessageTemplate::kSpeciesNotConstructor);
-
- BIND(&out);
- return var_result.value();
-}
-
-void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
- Node* value) {
- Node* elements = LoadObjectField(promise, offset);
- Node* length = LoadFixedArrayBaseLength(elements);
- CodeStubAssembler::ParameterMode mode = OptimalParameterMode();
- length = TaggedToParameter(length, mode);
-
- Node* delta = IntPtrOrSmiConstant(1, mode);
- Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
-
- const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- int additional_offset = 0;
-
- ExtractFixedArrayFlags flags;
- flags |= ExtractFixedArrayFlag::kFixedArrays;
- Node* new_elements =
- ExtractFixedArray(elements, nullptr, length, new_capacity, flags, mode);
-
- StoreFixedArrayElement(new_elements, length, value, barrier_mode,
- additional_offset, mode);
-
- StoreObjectField(promise, offset, new_elements);
-}
-
-Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
- Node* promise,
- Node* on_resolve,
- Node* on_reject) {
- Isolate* isolate = this->isolate();
-
- // 2. If IsPromise(promise) is false, throw a TypeError exception.
- ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
- "Promise.prototype.then");
-
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
-
- // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
- Node* constructor = SpeciesConstructor(context, promise, promise_fun);
-
- // 4. Let resultCapability be ? NewPromiseCapability(C).
- Callable call_callable = CodeFactory::Call(isolate);
- Label fast_promise_capability(this), promise_capability(this),
- perform_promise_then(this);
- VARIABLE(var_deferred_promise, MachineRepresentation::kTagged);
- VARIABLE(var_deferred_on_resolve, MachineRepresentation::kTagged);
- VARIABLE(var_deferred_on_reject, MachineRepresentation::kTagged);
-
- Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
- &promise_capability);
-
- BIND(&fast_promise_capability);
- {
- Node* const deferred_promise = AllocateAndInitJSPromise(context, promise);
- var_deferred_promise.Bind(deferred_promise);
- var_deferred_on_resolve.Bind(UndefinedConstant());
- var_deferred_on_reject.Bind(UndefinedConstant());
- Goto(&perform_promise_then);
- }
-
- BIND(&promise_capability);
- {
- Node* const capability = NewPromiseCapability(context, constructor);
- var_deferred_promise.Bind(
- LoadObjectField(capability, PromiseCapability::kPromiseOffset));
- var_deferred_on_resolve.Bind(
- LoadObjectField(capability, PromiseCapability::kResolveOffset));
- var_deferred_on_reject.Bind(
- LoadObjectField(capability, PromiseCapability::kRejectOffset));
- Goto(&perform_promise_then);
- }
-
- // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
- // resultCapability).
- BIND(&perform_promise_then);
- Node* const result = InternalPerformPromiseThen(
- context, promise, on_resolve, on_reject, var_deferred_promise.value(),
- var_deferred_on_resolve.value(), var_deferred_on_reject.value());
- return result;
-}
-
-Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
- Node* context, Node* promise, Node* on_resolve, Node* on_reject,
- Node* deferred_promise, Node* deferred_on_resolve,
- Node* deferred_on_reject) {
- VARIABLE(var_on_resolve, MachineRepresentation::kTagged);
- VARIABLE(var_on_reject, MachineRepresentation::kTagged);
-
- var_on_resolve.Bind(on_resolve);
- var_on_reject.Bind(on_reject);
-
- Label out(this), if_onresolvenotcallable(this), onrejectcheck(this),
- append_callbacks(this);
- GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
-
- Branch(IsCallable(on_resolve), &onrejectcheck, &if_onresolvenotcallable);
-
- BIND(&if_onresolvenotcallable);
- {
- var_on_resolve.Bind(PromiseDefaultResolveHandlerSymbolConstant());
- Goto(&onrejectcheck);
- }
-
- BIND(&onrejectcheck);
+// ES #sec-performpromisethen
+void PromiseBuiltinsAssembler::PerformPromiseThen(
+ Node* context, Node* promise, Node* on_fulfilled, Node* on_rejected,
+ Node* result_promise_or_capability) {
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
+ CSA_ASSERT(this,
+ Word32Or(IsCallable(on_fulfilled), IsUndefined(on_fulfilled)));
+ CSA_ASSERT(this, Word32Or(IsCallable(on_rejected), IsUndefined(on_rejected)));
+ CSA_ASSERT(this, TaggedIsNotSmi(result_promise_or_capability));
+ CSA_ASSERT(this, Word32Or(IsJSPromise(result_promise_or_capability),
+ IsPromiseCapability(result_promise_or_capability)));
+
+ Label if_pending(this), if_notpending(this), done(this);
+ Node* const status = PromiseStatus(promise);
+ Branch(IsPromiseStatus(status, v8::Promise::kPending), &if_pending,
+ &if_notpending);
+
+ BIND(&if_pending);
{
- Label if_onrejectnotcallable(this);
- GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
-
- Branch(IsCallable(on_reject), &append_callbacks, &if_onrejectnotcallable);
-
- BIND(&if_onrejectnotcallable);
- {
- var_on_reject.Bind(PromiseDefaultRejectHandlerSymbolConstant());
- Goto(&append_callbacks);
- }
+ // The {promise} is still in "Pending" state, so we just record a new
+ // PromiseReaction holding both the onFulfilled and onRejected callbacks.
+ // Once the {promise} is resolved we decide on the concrete handler to
+ // push onto the microtask queue.
+ Node* const promise_reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* const reaction =
+ AllocatePromiseReaction(promise_reactions, result_promise_or_capability,
+ on_fulfilled, on_rejected);
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction);
+ Goto(&done);
}
- BIND(&append_callbacks);
+ BIND(&if_notpending);
{
- Label fulfilled_check(this);
- Node* const status = PromiseStatus(promise);
- GotoIfNot(IsPromiseStatus(status, v8::Promise::kPending), &fulfilled_check);
-
- Node* const existing_deferred_promise =
- LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
-
- Label if_noexistingcallbacks(this), if_existingcallbacks(this);
- Branch(IsUndefined(existing_deferred_promise), &if_noexistingcallbacks,
- &if_existingcallbacks);
-
- BIND(&if_noexistingcallbacks);
+ VARIABLE(var_map, MachineRepresentation::kTagged);
+ VARIABLE(var_handler, MachineRepresentation::kTagged);
+ Label if_fulfilled(this), if_rejected(this, Label::kDeferred),
+ enqueue(this);
+ Branch(IsPromiseStatus(status, v8::Promise::kFulfilled), &if_fulfilled,
+ &if_rejected);
+
+ BIND(&if_fulfilled);
{
- // Store callbacks directly in the slots.
- StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
- deferred_promise);
- StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
- deferred_on_resolve);
- StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
- deferred_on_reject);
- StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
- var_on_resolve.value());
- StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
- var_on_reject.value());
- Goto(&out);
+ var_map.Bind(LoadRoot(Heap::kPromiseFulfillReactionJobTaskMapRootIndex));
+ var_handler.Bind(on_fulfilled);
+ Goto(&enqueue);
}
- BIND(&if_existingcallbacks);
+ BIND(&if_rejected);
{
- Label if_singlecallback(this), if_multiplecallbacks(this);
- BranchIfJSObject(existing_deferred_promise, &if_singlecallback,
- &if_multiplecallbacks);
-
- BIND(&if_singlecallback);
- {
- // Create new FixedArrays to store callbacks, and migrate
- // existing callbacks.
- Node* const deferred_promise_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(deferred_promise_arr, 0,
- existing_deferred_promise);
- StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
-
- Node* const deferred_on_resolve_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- deferred_on_resolve_arr, 0,
- LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
- StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
-
- Node* const deferred_on_reject_arr =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- deferred_on_reject_arr, 0,
- LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
- StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
-
- Node* const fulfill_reactions =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- fulfill_reactions, 0,
- LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
- StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
-
- Node* const reject_reactions =
- AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(
- reject_reactions, 0,
- LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
- StoreFixedArrayElement(reject_reactions, 1, var_on_reject.value());
-
- // Store new FixedArrays in promise.
- StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
- deferred_promise_arr);
- StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
- deferred_on_resolve_arr);
- StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
- deferred_on_reject_arr);
- StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
- fulfill_reactions);
- StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
- reject_reactions);
- Goto(&out);
- }
-
- BIND(&if_multiplecallbacks);
- {
- AppendPromiseCallback(JSPromise::kDeferredPromiseOffset, promise,
- deferred_promise);
- AppendPromiseCallback(JSPromise::kDeferredOnResolveOffset, promise,
- deferred_on_resolve);
- AppendPromiseCallback(JSPromise::kDeferredOnRejectOffset, promise,
- deferred_on_reject);
- AppendPromiseCallback(JSPromise::kFulfillReactionsOffset, promise,
- var_on_resolve.value());
- AppendPromiseCallback(JSPromise::kRejectReactionsOffset, promise,
- var_on_reject.value());
- Goto(&out);
- }
+ CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
+ var_map.Bind(LoadRoot(Heap::kPromiseRejectReactionJobTaskMapRootIndex));
+ var_handler.Bind(on_rejected);
+ GotoIf(PromiseHasHandler(promise), &enqueue);
+ CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
+ Goto(&enqueue);
}
- BIND(&fulfilled_check);
- {
- Label reject(this);
- Node* const result = LoadObjectField(promise, JSPromise::kResultOffset);
- GotoIfNot(IsPromiseStatus(status, v8::Promise::kFulfilled), &reject);
-
- Node* info = AllocatePromiseReactionJobInfo(
- result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
- deferred_on_reject, context);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
-
- BIND(&reject);
- {
- CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected));
- Node* const has_handler = PromiseHasHandler(promise);
- Label enqueue(this);
-
- // TODO(gsathya): Fold these runtime calls and move to TF.
- GotoIf(has_handler, &enqueue);
- CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
- Goto(&enqueue);
-
- BIND(&enqueue);
- {
- Node* info = AllocatePromiseReactionJobInfo(
- result, var_on_reject.value(), deferred_promise,
- deferred_on_resolve, deferred_on_reject, context);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
- }
- }
- }
+ BIND(&enqueue);
+ Node* argument =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+ Node* microtask = AllocatePromiseReactionJobTask(
+ var_map.value(), context, argument, var_handler.value(),
+ result_promise_or_capability);
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), microtask);
+ Goto(&done);
}
- BIND(&out);
+ BIND(&done);
PromiseSetHasHandler(promise);
- return deferred_promise;
}
-// Promise fast path implementations rely on unmodified JSPromise instances.
-// We use a fairly coarse granularity for this and simply check whether both
-// the promise itself is unmodified (i.e. its map has not changed) and its
-// prototype is unmodified.
-// TODO(gsathya): Refactor this out to prevent code dupe with builtins-regexp
-void PromiseBuiltinsAssembler::BranchIfFastPath(Node* context, Node* promise,
- Label* if_isunmodified,
- Label* if_ismodified) {
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- BranchIfFastPath(native_context, promise_fun, promise, if_isunmodified,
- if_ismodified);
-}
-
-void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
- Node* promise_fun,
- Node* promise,
- Label* if_isunmodified,
- Label* if_ismodified) {
- CSA_ASSERT(this, IsNativeContext(native_context));
- CSA_ASSERT(this,
- WordEqual(promise_fun,
- LoadContextElement(native_context,
- Context::PROMISE_FUNCTION_INDEX)));
-
- GotoIfForceSlowPath(if_ismodified);
-
- Node* const map = LoadMap(promise);
- Node* const initial_map =
- LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = WordEqual(map, initial_map);
-
- GotoIfNot(has_initialmap, if_ismodified);
+// ES #sec-performpromisethen
+TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
+ Node* const result_promise = Parameter(Descriptor::kResultPromise);
- Node* const initial_proto_initial_map =
- LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_MAP_INDEX);
- Node* const proto_map = LoadMap(CAST(LoadMapPrototype(map)));
- Node* const proto_has_initialmap =
- WordEqual(proto_map, initial_proto_initial_map);
+ CSA_ASSERT(this, TaggedIsNotSmi(result_promise));
+ CSA_ASSERT(this, IsJSPromise(result_promise));
- Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+ PerformPromiseThen(context, promise, on_fulfilled, on_rejected,
+ result_promise);
+ Return(result_promise);
}
-Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobInfo(
- Node* thenable, Node* then, Node* resolve, Node* reject, Node* context) {
- Node* const info = Allocate(PromiseResolveThenableJobInfo::kSize);
- StoreMapNoWriteBarrier(info,
- Heap::kPromiseResolveThenableJobInfoMapRootIndex);
+Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(Node* next,
+ Node* payload,
+ Node* fulfill_handler,
+ Node* reject_handler) {
+ Node* const reaction = Allocate(PromiseReaction::kSize);
+ StoreMapNoWriteBarrier(reaction, Heap::kPromiseReactionMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next);
+ StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kPayloadOffset,
+ payload);
+ StoreObjectFieldNoWriteBarrier(
+ reaction, PromiseReaction::kFulfillHandlerOffset, fulfill_handler);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kThenableOffset, thenable);
+ reaction, PromiseReaction::kRejectHandlerOffset, reject_handler);
+ return reaction;
+}
+
+Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
+ Node* map, Node* context, Node* argument, Node* handler, Node* payload) {
+ Node* const microtask = Allocate(PromiseReactionJobTask::kSize);
+ StoreMapNoWriteBarrier(microtask, map);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kThenOffset, then);
+ microtask, PromiseReactionJobTask::kArgumentOffset, argument);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kResolveOffset, resolve);
+ microtask, PromiseReactionJobTask::kContextOffset, context);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kRejectOffset, reject);
+ microtask, PromiseReactionJobTask::kHandlerOffset, handler);
StoreObjectFieldNoWriteBarrier(
- info, PromiseResolveThenableJobInfo::kContextOffset, context);
- return info;
+ microtask, PromiseReactionJobTask::kPayloadOffset, payload);
+ return microtask;
}
-void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
- Node* promise,
- Node* result) {
- Isolate* isolate = this->isolate();
-
- VARIABLE(var_reason, MachineRepresentation::kTagged);
- VARIABLE(var_then, MachineRepresentation::kTagged);
-
- Label do_enqueue(this), fulfill(this), if_nocycle(this),
- if_cycle(this, Label::kDeferred),
- if_rejectpromise(this, Label::kDeferred), out(this);
-
- Label cycle_check(this);
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &cycle_check);
- CallRuntime(Runtime::kPromiseHookResolve, context, promise);
- Goto(&cycle_check);
-
- BIND(&cycle_check);
- // 6. If SameValue(resolution, promise) is true, then
- BranchIfSameValue(promise, result, &if_cycle, &if_nocycle);
- BIND(&if_nocycle);
-
- // 7. If Type(resolution) is not Object, then
- GotoIf(TaggedIsSmi(result), &fulfill);
- GotoIfNot(IsJSReceiver(result), &fulfill);
+Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
+ Heap::RootListIndex map_root_index, Node* context, Node* argument,
+ Node* handler, Node* payload) {
+ DCHECK(map_root_index == Heap::kPromiseFulfillReactionJobTaskMapRootIndex ||
+ map_root_index == Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ Node* const map = LoadRoot(map_root_index);
+ return AllocatePromiseReactionJobTask(map, context, argument, handler,
+ payload);
+}
- Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
- Node* const native_context = LoadNativeContext(context);
- Node* const promise_fun =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- BranchIfFastPath(native_context, promise_fun, result, &if_nativepromise,
- &if_notnativepromise);
+Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask(
+ Node* promise_to_resolve, Node* then, Node* thenable, Node* context) {
+ Node* const microtask = Allocate(PromiseResolveThenableJobTask::kSize);
+ StoreMapNoWriteBarrier(microtask,
+ Heap::kPromiseResolveThenableJobTaskMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kContextOffset, context);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kPromiseToResolveOffset,
+ promise_to_resolve);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kThenOffset, then);
+ StoreObjectFieldNoWriteBarrier(
+ microtask, PromiseResolveThenableJobTask::kThenableOffset, thenable);
+ return microtask;
+}
- // Resolution is a native promise and if it's already resolved or
- // rejected, shortcircuit the resolution procedure by directly
- // reusing the value from the promise.
- BIND(&if_nativepromise);
+// ES #sec-triggerpromisereactions
+Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
+ Node* context, Node* reactions, Node* argument,
+ PromiseReaction::Type type) {
+ // We need to reverse the {reactions} here, since we record them on the
+ // JSPromise in the reverse order.
{
- Node* const thenable_status = PromiseStatus(result);
- Node* const thenable_value =
- LoadObjectField(result, JSPromise::kResultOffset);
+ VARIABLE(var_current, MachineRepresentation::kTagged, reactions);
+ VARIABLE(var_reversed, MachineRepresentation::kTagged,
+ SmiConstant(Smi::kZero));
- Label if_isnotpending(this);
- GotoIfNot(IsPromiseStatus(thenable_status, v8::Promise::kPending),
- &if_isnotpending);
-
- // TODO(gsathya): Use a marker here instead of the actual then
- // callback, and check for the marker in PromiseResolveThenableJob
- // and perform PromiseThen.
- Node* const then =
- LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
- var_then.Bind(then);
- Goto(&do_enqueue);
-
- BIND(&if_isnotpending);
+ Label loop(this, {&var_current, &var_reversed}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
{
- Label if_fulfilled(this), if_rejected(this);
- Branch(IsPromiseStatus(thenable_status, v8::Promise::kFulfilled),
- &if_fulfilled, &if_rejected);
-
- BIND(&if_fulfilled);
- {
- PromiseFulfill(context, promise, thenable_value,
- v8::Promise::kFulfilled);
- PromiseSetHasHandler(promise);
- Goto(&out);
- }
-
- BIND(&if_rejected);
- {
- Label reject(this);
- Node* const has_handler = PromiseHasHandler(result);
-
- // Promise has already been rejected, but had no handler.
- // Revoke previously triggered reject event.
- GotoIf(has_handler, &reject);
- CallRuntime(Runtime::kPromiseRevokeReject, context, result);
- Goto(&reject);
-
- BIND(&reject);
- // Don't cause a debug event as this case is forwarding a rejection.
- InternalPromiseReject(context, promise, thenable_value, false);
- PromiseSetHasHandler(result);
- Goto(&out);
- }
+ Node* current = var_current.value();
+ GotoIf(TaggedIsSmi(current), &done_loop);
+ var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
+ StoreObjectField(current, PromiseReaction::kNextOffset,
+ var_reversed.value());
+ var_reversed.Bind(current);
+ Goto(&loop);
}
+ BIND(&done_loop);
+ reactions = var_reversed.value();
}
- BIND(&if_notnativepromise);
+ // Morph the {reactions} into PromiseReactionJobTasks and push them
+ // onto the microtask queue.
{
- // 8. Let then be Get(resolution, "then").
- Node* const then =
- GetProperty(context, result, isolate->factory()->then_string());
-
- // 9. If then is an abrupt completion, then
- GotoIfException(then, &if_rejectpromise, &var_reason);
+ VARIABLE(var_current, MachineRepresentation::kTagged, reactions);
- // 11. If IsCallable(thenAction) is false, then
- GotoIf(TaggedIsSmi(then), &fulfill);
- Node* const then_map = LoadMap(then);
- GotoIfNot(IsCallableMap(then_map), &fulfill);
- var_then.Bind(then);
- Goto(&do_enqueue);
+ Label loop(this, {&var_current}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* current = var_current.value();
+ GotoIf(TaggedIsSmi(current), &done_loop);
+ var_current.Bind(LoadObjectField(current, PromiseReaction::kNextOffset));
+
+ // Morph {current} from a PromiseReaction into a PromiseReactionJobTask
+ // and schedule that on the microtask queue. We try to minimize the number
+ // of stores here to avoid screwing up the store buffer.
+ STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
+ if (type == PromiseReaction::kFulfill) {
+ StoreMapNoWriteBarrier(
+ current, Heap::kPromiseFulfillReactionJobTaskMapRootIndex);
+ StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
+ argument);
+ StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
+ context);
+ STATIC_ASSERT(PromiseReaction::kFulfillHandlerOffset ==
+ PromiseReactionJobTask::kHandlerOffset);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseReactionJobTask::kPayloadOffset);
+ } else {
+ Node* handler =
+ LoadObjectField(current, PromiseReaction::kRejectHandlerOffset);
+ StoreMapNoWriteBarrier(current,
+ Heap::kPromiseRejectReactionJobTaskMapRootIndex);
+ StoreObjectField(current, PromiseReactionJobTask::kArgumentOffset,
+ argument);
+ StoreObjectField(current, PromiseReactionJobTask::kContextOffset,
+ context);
+ StoreObjectField(current, PromiseReactionJobTask::kHandlerOffset,
+ handler);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseReactionJobTask::kPayloadOffset);
+ }
+ CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), current);
+ Goto(&loop);
+ }
+ BIND(&done_loop);
}
- BIND(&do_enqueue);
- {
- // TODO(gsathya): Add fast path for native promises with unmodified
- // PromiseThen (which don't need these resolving functions, but
- // instead can just call resolve/reject directly).
- Node* resolve = nullptr;
- Node* reject = nullptr;
- std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
- promise, FalseConstant(), native_context);
-
- Node* const info = AllocatePromiseResolveThenableJobInfo(
- result, var_then.value(), resolve, reject, context);
-
- Label enqueue(this);
- GotoIfNot(IsDebugActive(), &enqueue);
-
- GotoIf(TaggedIsSmi(result), &enqueue);
- GotoIfNot(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
-
- // Mark the dependency of the new promise on the resolution
- Node* const key =
- HeapConstant(isolate->factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, result, key, promise,
- SmiConstant(LanguageMode::kStrict));
- Goto(&enqueue);
-
- // 12. Perform EnqueueJob("PromiseJobs",
- // PromiseResolveThenableJob, « promise, resolution, thenAction»).
- BIND(&enqueue);
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&out);
- }
+ return UndefinedConstant();
+}
- // 7.b Return FulfillPromise(promise, resolution).
- BIND(&fulfill);
- {
- PromiseFulfill(context, promise, result, v8::Promise::kFulfilled);
- Goto(&out);
- }
+template <typename... TArgs>
+Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
+ TArgs... args) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&if_cycle);
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_fast(this), if_slow(this, Label::kDeferred), done(this, &var_result);
+ GotoIf(TaggedIsSmi(receiver), &if_slow);
+ Node* const receiver_map = LoadMap(receiver);
+ // We can skip the "then" lookup on {receiver} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ BranchIfPromiseThenLookupChainIntact(native_context, receiver_map, &if_fast,
+ &if_slow);
+
+ BIND(&if_fast);
{
- // 6.a Let selfResolutionError be a newly created TypeError object.
- Node* const message_id = SmiConstant(MessageTemplate::kPromiseCyclic);
- Node* const error =
- CallRuntime(Runtime::kNewTypeError, context, message_id, result);
- var_reason.Bind(error);
-
- // 6.b Return RejectPromise(promise, selfResolutionError).
- Goto(&if_rejectpromise);
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ Node* const result =
+ CallJS(CodeFactory::CallFunction(
+ isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, receiver, args...);
+ var_result.Bind(result);
+ Goto(&done);
}
- // 9.a Return RejectPromise(promise, then.[[Value]]).
- BIND(&if_rejectpromise);
+ BIND(&if_slow);
{
- // Don't cause a debug event as this case is forwarding a rejection.
- InternalPromiseReject(context, promise, var_reason.value(), false);
- Goto(&out);
+ Node* const then = GetProperty(native_context, receiver,
+ isolate()->factory()->then_string());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, receiver, args...);
+ var_result.Bind(result);
+ Goto(&done);
}
- BIND(&out);
+ BIND(&done);
+ return var_result.value();
}
-void PromiseBuiltinsAssembler::PromiseFulfill(
- Node* context, Node* promise, Node* result,
- v8::Promise::PromiseState status) {
- Label do_promisereset(this);
-
- Node* const deferred_promise =
- LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
-
- GotoIf(IsUndefined(deferred_promise), &do_promisereset);
-
- Node* const tasks =
- status == v8::Promise::kFulfilled
- ? LoadObjectField(promise, JSPromise::kFulfillReactionsOffset)
- : LoadObjectField(promise, JSPromise::kRejectReactionsOffset);
-
- Node* const deferred_on_resolve =
- LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset);
- Node* const deferred_on_reject =
- LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset);
+void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
+ Node* native_context, Node* promise_map, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_ASSERT(this, IsJSPromiseMap(promise_map));
- Node* const info = AllocatePromiseReactionJobInfo(
- result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
- context);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfForceSlowPath(if_slow);
+ GotoIfNot(WordEqual(LoadMapPrototype(promise_map), promise_prototype),
+ if_slow);
+ Branch(IsSpeciesProtectorCellInvalid(), if_slow, if_fast);
+}
- CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info);
- Goto(&do_promisereset);
+void PromiseBuiltinsAssembler::BranchIfPromiseThenLookupChainIntact(
+ Node* native_context, Node* receiver_map, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsMap(receiver_map));
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&do_promisereset);
- {
- PromiseSetStatus(promise, status);
- StoreObjectField(promise, JSPromise::kResultOffset, result);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredPromiseOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredOnResolveOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kDeferredOnRejectOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kFulfillReactionsOffset,
- Heap::kUndefinedValueRootIndex);
- StoreObjectFieldRoot(promise, JSPromise::kRejectReactionsOffset,
- Heap::kUndefinedValueRootIndex);
- }
+ GotoIfForceSlowPath(if_slow);
+ GotoIfNot(IsJSPromiseMap(receiver_map), if_slow);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ GotoIfNot(WordEqual(LoadMapPrototype(receiver_map), promise_prototype),
+ if_slow);
+ Branch(IsPromiseThenProtectorCellInvalid(), if_slow, if_fast);
}
void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
@@ -878,43 +598,6 @@ void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
BIND(&has_access);
}
-void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
- Node* promise, Node* value,
- Node* debug_event) {
- Label out(this);
- GotoIfNot(IsDebugActive(), &out);
- GotoIfNot(WordEqual(TrueConstant(), debug_event), &out);
- CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
- Goto(&out);
-
- BIND(&out);
- InternalPromiseReject(context, promise, value, false);
-}
-
-// This duplicates a lot of logic from PromiseRejectEvent in
-// runtime-promise.cc
-void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
- Node* promise, Node* value,
- bool debug_event) {
- Label fulfill(this), exit(this);
-
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &fulfill);
- if (debug_event) {
- CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
- }
- CallRuntime(Runtime::kPromiseHookResolve, context, promise);
- Goto(&fulfill);
-
- BIND(&fulfill);
- PromiseFulfill(context, promise, value, v8::Promise::kRejected);
-
- GotoIf(PromiseHasHandler(promise), &exit);
- CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
- Goto(&exit);
-
- BIND(&exit);
-}
-
void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
Node* context, Node* condition, const NodeGenerator& object) {
Label done(this);
@@ -940,40 +623,52 @@ void PromiseBuiltinsAssembler::SetPromiseHandledByIfTrue(
BIND(&done);
}
-void PromiseBuiltinsAssembler::PerformFulfillClosure(Node* context, Node* value,
- bool should_resolve) {
- Label out(this);
+// ES #sec-promise-reject-functions
+TF_BUILTIN(PromiseCapabilityDefaultReject, PromiseBuiltinsAssembler) {
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const context = Parameter(Descriptor::kContext);
// 2. Let promise be F.[[Promise]].
- Node* const promise_slot = IntPtrConstant(kPromiseSlot);
- Node* const promise = LoadContextElement(context, promise_slot);
-
- // We use `undefined` as a marker to know that this callback was
- // already called.
- GotoIf(IsUndefined(promise), &out);
+ Node* const promise = LoadContextElement(context, kPromiseSlot);
- if (should_resolve) {
- InternalResolvePromise(context, promise, value);
- } else {
- Node* const debug_event =
- LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
- InternalPromiseReject(context, promise, value, debug_event);
- }
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ // We use undefined as a marker for the [[AlreadyResolved]] state.
+ ReturnIf(IsUndefined(promise), UndefinedConstant());
- StoreContextElement(context, promise_slot, UndefinedConstant());
- Goto(&out);
+ // 5. Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, UndefinedConstant());
- BIND(&out);
+ // 6. Return RejectPromise(promise, reason).
+ Node* const debug_event = LoadContextElement(context, kDebugEventSlot);
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
+ debug_event));
}
-// ES#sec-promise-reject-functions
-// Promise Reject Functions
-TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
+// ES #sec-promise-resolve-functions
+TF_BUILTIN(PromiseCapabilityDefaultResolve, PromiseBuiltinsAssembler) {
+ Node* const resolution = Parameter(Descriptor::kResolution);
Node* const context = Parameter(Descriptor::kContext);
- PerformFulfillClosure(context, value, false);
- Return(UndefinedConstant());
+ // 2. Let promise be F.[[Promise]].
+ Node* const promise = LoadContextElement(context, kPromiseSlot);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ // We use undefined as a marker for the [[AlreadyResolved]] state.
+ ReturnIf(IsUndefined(promise), UndefinedConstant());
+
+ // 5. Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, UndefinedConstant());
+
+ // The rest of the logic (and the catch prediction) is
+ // encapsulated in the dedicated ResolvePromise builtin.
+ Return(CallBuiltin(Builtins::kResolvePromise, context, promise, resolution));
+}
+
+TF_BUILTIN(PromiseConstructorLazyDeoptContinuation, PromiseBuiltinsAssembler) {
+ Node* promise = Parameter(Descriptor::kPromise);
+ Return(promise);
}
// ES6 #sec-promise-executor
@@ -1089,231 +784,357 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
}
}
+// V8 Extras: v8.createPromise(parent)
TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
Node* const parent = Parameter(Descriptor::kParent);
Node* const context = Parameter(Descriptor::kContext);
Return(AllocateAndInitJSPromise(context, parent));
}
+// V8 Extras: v8.rejectPromise(promise, reason)
+TF_BUILTIN(PromiseInternalReject, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const context = Parameter(Descriptor::kContext);
+ // We pass true to trigger the debugger's on exception handler.
+ Return(CallBuiltin(Builtins::kRejectPromise, context, promise, reason,
+ TrueConstant()));
+}
+
+// V8 Extras: v8.resolvePromise(promise, resolution)
+TF_BUILTIN(PromiseInternalResolve, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const resolution = Parameter(Descriptor::kResolution);
+ Node* const context = Parameter(Descriptor::kContext);
+ Return(CallBuiltin(Builtins::kResolvePromise, context, promise, resolution));
+}
+
// ES#sec-promise.prototype.then
// Promise.prototype.then ( onFulfilled, onRejected )
TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) {
// 1. Let promise be the this value.
Node* const promise = Parameter(Descriptor::kReceiver);
- Node* const on_resolve = Parameter(Descriptor::kOnFullfilled);
- Node* const on_reject = Parameter(Descriptor::kOnRejected);
+ Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled);
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
Node* const context = Parameter(Descriptor::kContext);
- Node* const result =
- InternalPromiseThen(context, promise, on_resolve, on_reject);
- Return(result);
-}
+ // 2. If IsPromise(promise) is false, throw a TypeError exception.
+ ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+ "Promise.prototype.then");
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions
-TF_BUILTIN(PromiseResolveClosure, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ Label fast_promise_capability(this), slow_constructor(this, Label::kDeferred),
+ slow_promise_capability(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const promise_map = LoadMap(promise);
+ BranchIfPromiseSpeciesLookupChainIntact(
+ native_context, promise_map, &fast_promise_capability, &slow_constructor);
- PerformFulfillClosure(context, value, true);
- Return(UndefinedConstant());
-}
+ BIND(&slow_constructor);
+ Node* const constructor =
+ SpeciesConstructor(native_context, promise, promise_fun);
+ Branch(WordEqual(constructor, promise_fun), &fast_promise_capability,
+ &slow_promise_capability);
-// ES #sec-fulfillpromise
-TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const result = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
+ // 4. Let resultCapability be ? NewPromiseCapability(C).
+ Label perform_promise_then(this);
+ VARIABLE(var_result_promise, MachineRepresentation::kTagged);
+ VARIABLE(var_result_promise_or_capability, MachineRepresentation::kTagged);
- InternalResolvePromise(context, promise, result);
- Return(UndefinedConstant());
+ BIND(&fast_promise_capability);
+ {
+ Node* const result_promise = AllocateAndInitJSPromise(context, promise);
+ var_result_promise_or_capability.Bind(result_promise);
+ var_result_promise.Bind(result_promise);
+ Goto(&perform_promise_then);
+ }
+
+ BIND(&slow_promise_capability);
+ {
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, constructor, debug_event);
+ var_result_promise.Bind(
+ LoadObjectField(capability, PromiseCapability::kPromiseOffset));
+ var_result_promise_or_capability.Bind(capability);
+ Goto(&perform_promise_then);
+ }
+
+ // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
+ // resultCapability).
+ BIND(&perform_promise_then);
+ {
+ // We do some work of the PerformPromiseThen operation here, in that
+ // we check the handlers and turn non-callable handlers into undefined.
+ // This is because this is the one and only callsite of PerformPromiseThen
+ // that has to do this.
+
+ // 3. If IsCallable(onFulfilled) is false, then
+ // a. Set onFulfilled to undefined.
+ VARIABLE(var_on_fulfilled, MachineRepresentation::kTagged, on_fulfilled);
+ Label if_fulfilled_done(this), if_fulfilled_notcallable(this);
+ GotoIf(TaggedIsSmi(on_fulfilled), &if_fulfilled_notcallable);
+ Branch(IsCallable(on_fulfilled), &if_fulfilled_done,
+ &if_fulfilled_notcallable);
+ BIND(&if_fulfilled_notcallable);
+ var_on_fulfilled.Bind(UndefinedConstant());
+ Goto(&if_fulfilled_done);
+ BIND(&if_fulfilled_done);
+
+ // 4. If IsCallable(onRejected) is false, then
+ // a. Set onRejected to undefined.
+ VARIABLE(var_on_rejected, MachineRepresentation::kTagged, on_rejected);
+ Label if_rejected_done(this), if_rejected_notcallable(this);
+ GotoIf(TaggedIsSmi(on_rejected), &if_rejected_notcallable);
+ Branch(IsCallable(on_rejected), &if_rejected_done,
+ &if_rejected_notcallable);
+ BIND(&if_rejected_notcallable);
+ var_on_rejected.Bind(UndefinedConstant());
+ Goto(&if_rejected_done);
+ BIND(&if_rejected_done);
+
+ PerformPromiseThen(context, promise, var_on_fulfilled.value(),
+ var_on_rejected.value(),
+ var_result_promise_or_capability.value());
+ Return(var_result_promise.value());
+ }
}
-TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const on_reject = Parameter(Descriptor::kOnReject);
- Node* const exception = Parameter(Descriptor::kException);
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
+TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
+ // 1. Let promise be the this value.
+ Node* const receiver = Parameter(Descriptor::kReceiver);
+ Node* const on_fulfilled = UndefinedConstant();
+ Node* const on_rejected = Parameter(Descriptor::kOnRejected);
Node* const context = Parameter(Descriptor::kContext);
- VARIABLE(var_unused, MachineRepresentation::kTagged);
+ // 2. Return ? Invoke(promise, "then", « undefined, onRejected »).
+ Node* const native_context = LoadNativeContext(context);
+ Return(InvokeThen(native_context, receiver, on_fulfilled, on_rejected));
+}
- Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
- Branch(IsUndefined(on_reject), &if_internalhandler, &if_customhandler);
+// ES #sec-promiseresolvethenablejob
+TF_BUILTIN(PromiseResolveThenableJob, PromiseBuiltinsAssembler) {
+ Node* const native_context = Parameter(Descriptor::kContext);
+ Node* const promise_to_resolve = Parameter(Descriptor::kPromiseToResolve);
+ Node* const thenable = Parameter(Descriptor::kThenable);
+ Node* const then = Parameter(Descriptor::kThen);
+
+ CSA_ASSERT(this, TaggedIsNotSmi(thenable));
+ CSA_ASSERT(this, IsJSReceiver(thenable));
+ CSA_ASSERT(this, IsJSPromise(promise_to_resolve));
+ CSA_ASSERT(this, IsNativeContext(native_context));
- BIND(&if_internalhandler);
+ // We can use a simple optimization here if we know that {then} is the initial
+ // Promise.prototype.then method, and {thenable} is a JSPromise whose
+ // @@species lookup chain is intact: We can connect {thenable} and
+ // {promise_to_resolve} directly in that case and avoid the allocation of a
+ // temporary JSPromise and the closures plus context.
+ //
+ // We take the generic (slow-)path if a PromiseHook is enabled or the debugger
+ // is active, to make sure we expose spec compliant behavior.
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Node* const promise_then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ GotoIfNot(WordEqual(then, promise_then), &if_slow);
+ Node* const thenable_map = LoadMap(thenable);
+ GotoIfNot(IsJSPromiseMap(thenable_map), &if_slow);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_slow);
+ BranchIfPromiseSpeciesLookupChainIntact(native_context, thenable_map,
+ &if_fast, &if_slow);
+
+ BIND(&if_fast);
{
- InternalPromiseReject(context, promise, exception, false);
- Return(UndefinedConstant());
+ // We know that the {thenable} is a JSPromise, which doesn't require
+ // any special treatment and that {then} corresponds to the initial
+ // Promise.prototype.then method. So instead of allocating a temporary
+ // JSPromise to connect the {thenable} with the {promise_to_resolve},
+ // we can directly schedule the {promise_to_resolve} with default
+ // handlers onto the {thenable} promise. This does not only save the
+ // JSPromise allocation, but also avoids the allocation of the two
+ // resolving closures and the shared context.
+ //
+ // What happens normally in this case is
+ //
+ // resolve, reject = CreateResolvingFunctions(promise_to_resolve)
+ // result_capability = NewPromiseCapability(%Promise%)
+ // PerformPromiseThen(thenable, resolve, reject, result_capability)
+ //
+ // which means that PerformPromiseThen will either schedule a new
+ // PromiseReaction with resolve and reject or a PromiseReactionJob
+ // with resolve or reject based on the state of {thenable}. And
+ // resolve or reject will just invoke the default [[Resolve]] or
+ // [[Reject]] functions on the {promise_to_resolve}.
+ //
+ // This is the same as just doing
+ //
+ // PerformPromiseThen(thenable, undefined, undefined, promise_to_resolve)
+ //
+ // which performs exactly the same (observable) steps.
+ TailCallBuiltin(Builtins::kPerformPromiseThen, native_context, thenable,
+ UndefinedConstant(), UndefinedConstant(),
+ promise_to_resolve);
}
- BIND(&if_customhandler);
+ BIND(&if_slow);
{
+ Node* resolve = nullptr;
+ Node* reject = nullptr;
+ std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+ promise_to_resolve, FalseConstant(), native_context);
+
+ Label if_exception(this, Label::kDeferred);
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
- Label if_exception(this);
- Node* const ret = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- context, on_reject, UndefinedConstant(), exception);
- GotoIfException(ret, &if_exception, &var_exception);
- Return(UndefinedConstant());
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, then, thenable, resolve, reject);
+ GotoIfException(result, &if_exception, &var_exception);
+ Return(result);
+
BIND(&if_exception);
- CallRuntime(Runtime::kReportMessage, context, var_exception.value());
- Return(UndefinedConstant());
+ {
+ // We need to reject the {thenable}.
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ native_context, UndefinedConstant(), var_exception.value());
+ Return(result);
+ }
}
}
-TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
- Node* const value = Parameter(Descriptor::kValue);
- Node* const handler = Parameter(Descriptor::kHandler);
- Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
- Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
- Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
- Node* const context = Parameter(Descriptor::kContext);
- Isolate* isolate = this->isolate();
-
- VARIABLE(var_reason, MachineRepresentation::kTagged);
+// ES #sec-promisereactionjob
+void PromiseBuiltinsAssembler::PromiseReactionJob(Node* context, Node* argument,
+ Node* handler, Node* payload,
+ PromiseReaction::Type type) {
+ CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ CSA_ASSERT(this, Word32Or(IsCallable(handler),
+ Word32Or(IsCode(handler), IsUndefined(handler))));
+ CSA_ASSERT(this, TaggedIsNotSmi(payload));
- Node* const is_debug_active = IsDebugActive();
- Label run_handler(this), if_rejectpromise(this), promisehook_before(this),
- promisehook_after(this), debug_pop(this);
+ VARIABLE(var_handler_result, MachineRepresentation::kTagged, argument);
+ Label if_handler_callable(this), if_fulfill(this), if_reject(this),
+ if_code_handler(this);
- GotoIfNot(is_debug_active, &promisehook_before);
- CallRuntime(Runtime::kDebugPushPromise, context, deferred_promise);
- Goto(&promisehook_before);
+ GotoIf(IsUndefined(handler),
+ type == PromiseReaction::kFulfill ? &if_fulfill : &if_reject);
+ Branch(IsCode(handler), &if_code_handler, &if_handler_callable);
- BIND(&promisehook_before);
+ BIND(&if_code_handler);
{
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &run_handler);
- CallRuntime(Runtime::kPromiseHookBefore, context, deferred_promise);
- Goto(&run_handler);
+ // The {handler} is a Code object that knows how to deal with
+ // the {payload} and the {argument}.
+ PromiseReactionHandlerDescriptor descriptor(isolate());
+ TailCallStub(descriptor, handler, context, argument, payload);
}
- BIND(&run_handler);
+ BIND(&if_handler_callable);
{
- Label if_defaulthandler(this), if_callablehandler(this),
- if_internalhandler(this), if_customhandler(this, Label::kDeferred);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, handler, UndefinedConstant(), argument);
+ GotoIfException(result, &if_reject, &var_handler_result);
+ var_handler_result.Bind(result);
+ Goto(&if_fulfill);
+ }
- Branch(IsSymbol(handler), &if_defaulthandler, &if_callablehandler);
+ BIND(&if_fulfill);
+ {
+ Label if_promise(this), if_promise_capability(this, Label::kDeferred);
+ Node* const value = var_handler_result.value();
+ Branch(IsPromiseCapability(payload), &if_promise_capability, &if_promise);
- BIND(&if_defaulthandler);
+ BIND(&if_promise);
{
- Label if_resolve(this), if_reject(this);
- Branch(IsPromiseDefaultResolveHandlerSymbol(handler), &if_resolve,
- &if_reject);
-
- BIND(&if_resolve);
- {
- var_result.Bind(value);
- Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
- &if_customhandler);
- }
-
- BIND(&if_reject);
- {
- var_reason.Bind(value);
- Goto(&if_rejectpromise);
- }
+ // For fast native promises we can skip the indirection
+ // via the promiseCapability.[[Resolve]] function and
+ // run the resolve logic directly from here.
+ TailCallBuiltin(Builtins::kResolvePromise, context, payload, value);
}
- BIND(&if_callablehandler);
+ BIND(&if_promise_capability);
{
+ // In the general case we need to call the (user provided)
+ // promiseCapability.[[Resolve]] function.
+ Node* const resolve =
+ LoadObjectField(payload, PromiseCapability::kResolveOffset);
Node* const result = CallJS(
- CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, handler, UndefinedConstant(), value);
- var_result.Bind(result);
- GotoIfException(result, &if_rejectpromise, &var_reason);
- Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
- &if_customhandler);
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
+ GotoIfException(result, &if_reject, &var_handler_result);
+ Return(result);
}
+ }
- BIND(&if_internalhandler);
- InternalResolvePromise(context, deferred_promise, var_result.value());
- Goto(&promisehook_after);
+ BIND(&if_reject);
+ if (type == PromiseReaction::kReject) {
+ Label if_promise(this), if_promise_capability(this, Label::kDeferred);
+ Node* const reason = var_handler_result.value();
+ Branch(IsPromiseCapability(payload), &if_promise_capability, &if_promise);
- BIND(&if_customhandler);
+ BIND(&if_promise);
{
- Node* const maybe_exception = CallJS(
- CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, deferred_on_resolve, UndefinedConstant(),
- var_result.value());
- GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
- Goto(&promisehook_after);
+ // For fast native promises we can skip the indirection
+ // via the promiseCapability.[[Reject]] function and
+ // run the resolve logic directly from here.
+ TailCallBuiltin(Builtins::kRejectPromise, context, payload, reason,
+ FalseConstant());
}
- }
-
- BIND(&if_rejectpromise);
- {
- CallBuiltin(Builtins::kPromiseHandleReject, context, deferred_promise,
- deferred_on_reject, var_reason.value());
- Goto(&promisehook_after);
- }
-
- BIND(&promisehook_after);
- {
- GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_pop);
- CallRuntime(Runtime::kPromiseHookAfter, context, deferred_promise);
- Goto(&debug_pop);
- }
-
- BIND(&debug_pop);
- {
- Label out(this);
- GotoIfNot(is_debug_active, &out);
- CallRuntime(Runtime::kDebugPopPromise, context);
- Goto(&out);
+ BIND(&if_promise_capability);
+ {
+ // In the general case we need to call the (user provided)
+ // promiseCapability.[[Reject]] function.
+ Label if_exception(this, Label::kDeferred);
+ VARIABLE(var_exception, MachineRepresentation::kTagged,
+ TheHoleConstant());
+ Node* const reject =
+ LoadObjectField(payload, PromiseCapability::kRejectOffset);
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, reject, UndefinedConstant(), reason);
+ GotoIfException(result, &if_exception, &var_exception);
+ Return(result);
- BIND(&out);
- Return(UndefinedConstant());
+ // Swallow the exception here.
+ BIND(&if_exception);
+ TailCallRuntime(Runtime::kReportMessage, context, var_exception.value());
+ }
+ } else {
+ // We have to call out to the dedicated PromiseRejectReactionJob builtin
+ // here, instead of just doing the work inline, as otherwise the catch
+ // predictions in the debugger will be wrong, which just walks the stack
+ // and checks for certain builtins.
+ TailCallBuiltin(Builtins::kPromiseRejectReactionJob, context,
+ var_handler_result.value(), UndefinedConstant(), payload);
}
}
-TF_BUILTIN(PromiseHandleJS, PromiseBuiltinsAssembler) {
+// ES #sec-promisereactionjob
+TF_BUILTIN(PromiseFulfillReactionJob, PromiseBuiltinsAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
Node* const value = Parameter(Descriptor::kValue);
Node* const handler = Parameter(Descriptor::kHandler);
- Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise);
- Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve);
- Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject);
- Node* const context = Parameter(Descriptor::kContext);
+ Node* const payload = Parameter(Descriptor::kPayload);
- Node* const result =
- CallBuiltin(Builtins::kPromiseHandle, context, value, handler,
- deferred_promise, deferred_on_resolve, deferred_on_reject);
- Return(result);
+ PromiseReactionJob(context, value, handler, payload,
+ PromiseReaction::kFulfill);
}
-// ES#sec-promise.prototype.catch
-// Promise.prototype.catch ( onRejected )
-TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) {
- // 1. Let promise be the this value.
- Node* const promise = Parameter(Descriptor::kReceiver);
- Node* const on_resolve = UndefinedConstant();
- Node* const on_reject = Parameter(Descriptor::kOnRejected);
+// ES #sec-promisereactionjob
+TF_BUILTIN(PromiseRejectReactionJob, PromiseBuiltinsAssembler) {
Node* const context = Parameter(Descriptor::kContext);
+ Node* const reason = Parameter(Descriptor::kReason);
+ Node* const handler = Parameter(Descriptor::kHandler);
+ Node* const payload = Parameter(Descriptor::kPayload);
- Label if_internalthen(this), if_customthen(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(promise), &if_customthen);
- BranchIfFastPath(context, promise, &if_internalthen, &if_customthen);
-
- BIND(&if_internalthen);
- {
- Node* const result =
- InternalPromiseThen(context, promise, on_resolve, on_reject);
- Return(result);
- }
-
- BIND(&if_customthen);
- {
- Node* const then =
- GetProperty(context, promise, isolate()->factory()->then_string());
- Node* const result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, then, promise, on_resolve, on_reject);
- Return(result);
- }
+ PromiseReactionJob(context, reason, handler, payload,
+ PromiseReaction::kReject);
}
-TF_BUILTIN(PromiseResolveWrapper, PromiseBuiltinsAssembler) {
+TF_BUILTIN(PromiseResolveTrampoline, PromiseBuiltinsAssembler) {
// 1. Let C be the this value.
Node* receiver = Parameter(Descriptor::kReceiver);
Node* value = Parameter(Descriptor::kValue);
@@ -1331,51 +1152,49 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
Node* constructor = Parameter(Descriptor::kConstructor);
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- Isolate* isolate = this->isolate();
+
+ CSA_ASSERT(this, IsJSReceiver(constructor));
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Label if_value_is_native_promise(this),
- if_value_or_constructor_are_not_native_promise(this),
- if_need_to_allocate(this);
+ Label if_slow_constructor(this, Label::kDeferred), if_need_to_allocate(this);
+ // Check if {value} is a JSPromise.
GotoIf(TaggedIsSmi(value), &if_need_to_allocate);
-
- // This shortcircuits the constructor lookups.
- GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &if_need_to_allocate);
-
- // This adds a fast path as non-subclassed native promises don't have
- // an observable constructor lookup.
- BranchIfFastPath(native_context, promise_fun, value,
- &if_value_is_native_promise,
- &if_value_or_constructor_are_not_native_promise);
-
- BIND(&if_value_is_native_promise);
- {
- GotoIfNot(WordEqual(promise_fun, constructor),
- &if_value_or_constructor_are_not_native_promise);
- Return(value);
- }
+ Node* const value_map = LoadMap(value);
+ GotoIfNot(IsJSPromiseMap(value_map), &if_need_to_allocate);
+
+ // We can skip the "constructor" lookup on {value} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the @@species protector is
+ // intact, as that guards the lookup path for "constructor" on
+ // JSPromise instances which have the (initial) Promise.prototype.
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
+ &if_slow_constructor);
+ GotoIf(IsSpeciesProtectorCellInvalid(), &if_slow_constructor);
+
+ // If the {constructor} is the Promise function, we just immediately
+ // return the {value} here and don't bother wrapping it into a
+ // native Promise.
+ GotoIfNot(WordEqual(promise_fun, constructor), &if_slow_constructor);
+ Return(value);
// At this point, value or/and constructor are not native promises, but
// they could be of the same subclass.
- BIND(&if_value_or_constructor_are_not_native_promise);
+ BIND(&if_slow_constructor);
{
- Label if_return(this);
- Node* const xConstructor =
- GetProperty(context, value, isolate->factory()->constructor_string());
- BranchIfSameValue(xConstructor, constructor, &if_return,
- &if_need_to_allocate);
-
- BIND(&if_return);
+ Node* const value_constructor =
+ GetProperty(context, value, isolate()->factory()->constructor_string());
+ GotoIfNot(WordEqual(value_constructor, constructor), &if_need_to_allocate);
Return(value);
}
BIND(&if_need_to_allocate);
{
- Label if_nativepromise(this), if_notnativepromise(this);
+ Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
Branch(WordEqual(promise_fun, constructor), &if_nativepromise,
&if_notnativepromise);
@@ -1384,18 +1203,21 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
BIND(&if_nativepromise);
{
Node* const result = AllocateAndInitJSPromise(context);
- InternalResolvePromise(context, result, value);
+ CallBuiltin(Builtins::kResolvePromise, context, result, value);
Return(result);
}
BIND(&if_notnativepromise);
{
- Node* const capability = NewPromiseCapability(context, constructor);
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, constructor, debug_event);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
- CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined),
- context, resolve, UndefinedConstant(), value);
+ CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ context, resolve, UndefinedConstant(), value);
Node* const result =
LoadObjectField(capability, PromiseCapability::kPromiseOffset);
@@ -1429,17 +1251,6 @@ TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kPromiseExecutorAlreadyInvoked);
}
-// ES6 #sec-newpromisecapability
-TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
- Node* constructor = Parameter(Descriptor::kConstructor);
- Node* debug_event = Parameter(Descriptor::kDebugEvent);
- Node* context = Parameter(Descriptor::kContext);
-
- CSA_ASSERT_JS_ARGC_EQ(this, 2);
-
- Return(NewPromiseCapability(context, constructor, debug_event));
-}
-
TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
// 1. Let C be the this value.
Node* const receiver = Parameter(Descriptor::kReceiver);
@@ -1470,7 +1281,9 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
BIND(&if_custompromise);
{
// 3. Let promiseCapability be ? NewPromiseCapability(C).
- Node* const capability = NewPromiseCapability(context, receiver);
+ Node* const debug_event = TrueConstant();
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability,
+ context, receiver, debug_event);
// 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
Node* const reject =
@@ -1485,16 +1298,6 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
}
}
-TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
- Node* const promise = Parameter(Descriptor::kPromise);
- Node* const reason = Parameter(Descriptor::kReason);
- Node* const debug_event = Parameter(Descriptor::kDebugEvent);
- Node* const context = Parameter(Descriptor::kContext);
-
- InternalPromiseReject(context, promise, reason, debug_event);
- Return(UndefinedConstant());
-}
-
std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
Node* on_finally, Node* constructor, Node* native_context) {
Node* const promise_context =
@@ -1565,16 +1368,11 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
// 7. Let valueThunk be equivalent to a function that returns value.
- Node* native_context = LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const value_thunk = CreateValueThunkFunction(value, native_context);
// 8. Return ? Invoke(promise, "then", « valueThunk »).
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, value_thunk);
- Return(result_promise);
+ Return(InvokeThen(native_context, promise, value_thunk));
}
TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
@@ -1627,35 +1425,44 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
CallBuiltin(Builtins::kPromiseResolve, context, constructor, result);
// 7. Let thrower be equivalent to a function that throws reason.
- Node* native_context = LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const thrower = CreateThrowerFunction(reason, native_context);
// 8. Return ? Invoke(promise, "then", « thrower »).
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, thrower);
- Return(result_promise);
+ Return(InvokeThen(native_context, promise, thrower));
}
TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
// 1. Let promise be the this value.
- Node* const promise = Parameter(Descriptor::kReceiver);
+ Node* const receiver = Parameter(Descriptor::kReceiver);
Node* const on_finally = Parameter(Descriptor::kOnFinally);
Node* const context = Parameter(Descriptor::kContext);
// 2. If Type(promise) is not Object, throw a TypeError exception.
- ThrowIfNotJSReceiver(context, promise, MessageTemplate::kCalledOnNonObject,
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
"Promise.prototype.finally");
// 3. Let C be ? SpeciesConstructor(promise, %Promise%).
Node* const native_context = LoadNativeContext(context);
Node* const promise_fun =
LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
- Node* const constructor = SpeciesConstructor(context, promise, promise_fun);
+ VARIABLE(var_constructor, MachineRepresentation::kTagged, promise_fun);
+ Label slow_constructor(this, Label::kDeferred), done_constructor(this);
+ Node* const receiver_map = LoadMap(receiver);
+ GotoIfNot(IsJSPromiseMap(receiver_map), &slow_constructor);
+ BranchIfPromiseSpeciesLookupChainIntact(native_context, receiver_map,
+ &done_constructor, &slow_constructor);
+ BIND(&slow_constructor);
+ {
+ Node* const constructor =
+ SpeciesConstructor(context, receiver, promise_fun);
+ var_constructor.Bind(constructor);
+ Goto(&done_constructor);
+ }
+ BIND(&done_constructor);
+ Node* const constructor = var_constructor.value();
// 4. Assert: IsConstructor(C) is true.
CSA_ASSERT(this, IsConstructor(constructor));
@@ -1697,50 +1504,172 @@ TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) {
// 7. Return ? Invoke(promise, "then", « thenFinally, catchFinally »).
BIND(&perform_finally);
- Node* const promise_then =
- GetProperty(context, promise, factory()->then_string());
- Node* const result_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_then, promise, var_then_finally.value(),
- var_catch_finally.value());
- Return(result_promise);
+ Return(InvokeThen(native_context, receiver, var_then_finally.value(),
+ var_catch_finally.value()));
}
-TF_BUILTIN(ResolveNativePromise, PromiseBuiltinsAssembler) {
+// ES #sec-fulfillpromise
+TF_BUILTIN(FulfillPromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
- InternalResolvePromise(context, promise, value);
- Return(UndefinedConstant());
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
+
+ // 2. Let reactions be promise.[[PromiseFulfillReactions]].
+ Node* const reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+
+ // 3. Set promise.[[PromiseResult]] to value.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, value);
+
+ // 6. Set promise.[[PromiseState]] to "fulfilled".
+ PromiseSetStatus(promise, Promise::kFulfilled);
+
+ // 7. Return TriggerPromiseReactions(reactions, value).
+ Return(TriggerPromiseReactions(context, reactions, value,
+ PromiseReaction::kFulfill));
}
-TF_BUILTIN(RejectNativePromise, PromiseBuiltinsAssembler) {
+// ES #sec-rejectpromise
+TF_BUILTIN(RejectPromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
- Node* const value = Parameter(Descriptor::kValue);
+ Node* const reason = Parameter(Descriptor::kReason);
Node* const debug_event = Parameter(Descriptor::kDebugEvent);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE));
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
CSA_ASSERT(this, IsBoolean(debug_event));
- InternalPromiseReject(context, promise, value, debug_event);
- Return(UndefinedConstant());
+ Label if_runtime(this, Label::kDeferred);
+
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+
+ // 7. If promise.[[PromiseIsHandled]] is false, perform
+ // HostPromiseRejectionTracker(promise, "reject").
+ // We don't try to handle rejecting {promise} without handler
+ // here, but we let the C++ code take care of this completely.
+ GotoIfNot(PromiseHasHandler(promise), &if_runtime);
+
+ // 2. Let reactions be promise.[[PromiseRejectReactions]].
+ Node* reactions =
+ LoadObjectField(promise, JSPromise::kReactionsOrResultOffset);
+
+ // 3. Set promise.[[PromiseResult]] to reason.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reason);
+
+ // 6. Set promise.[[PromiseState]] to "rejected".
+ PromiseSetStatus(promise, Promise::kRejected);
+
+ // 7. Return TriggerPromiseReactions(reactions, reason).
+ Return(TriggerPromiseReactions(context, reactions, reason,
+ PromiseReaction::kReject));
+
+ BIND(&if_runtime);
+ TailCallRuntime(Runtime::kRejectPromise, context, promise, reason,
+ debug_event);
}
-TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) {
+// ES #sec-promise-resolve-functions
+TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
Node* const promise = Parameter(Descriptor::kPromise);
- Node* const resolve_reaction = Parameter(Descriptor::kResolveReaction);
- Node* const reject_reaction = Parameter(Descriptor::kRejectReaction);
- Node* const result_promise = Parameter(Descriptor::kResultPromise);
+ Node* const resolution = Parameter(Descriptor::kResolution);
Node* const context = Parameter(Descriptor::kContext);
- CSA_ASSERT(this, HasInstanceType(result_promise, JS_PROMISE_TYPE));
+ CSA_ASSERT(this, TaggedIsNotSmi(promise));
+ CSA_ASSERT(this, IsJSPromise(promise));
- InternalPerformPromiseThen(context, promise, resolve_reaction,
- reject_reaction, result_promise,
- UndefinedConstant(), UndefinedConstant());
- Return(result_promise);
+ Label do_enqueue(this), if_fulfill(this), if_reject(this, Label::kDeferred),
+ if_runtime(this, Label::kDeferred);
+ VARIABLE(var_reason, MachineRepresentation::kTagged);
+ VARIABLE(var_then, MachineRepresentation::kTagged);
+
+ // If promise hook is enabled or the debugger is active, let
+ // the runtime handle this operation, which greatly reduces
+ // the complexity here and also avoids a couple of back and
+ // forth between JavaScript and C++ land.
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_runtime);
+
+ // 6. If SameValue(resolution, promise) is true, then
+ // We can use pointer comparison here, since the {promise} is guaranteed
+ // to be a JSPromise inside this function and thus is reference comparable.
+ GotoIf(WordEqual(promise, resolution), &if_runtime);
+
+ // 7. If Type(resolution) is not Object, then
+ GotoIf(TaggedIsSmi(resolution), &if_fulfill);
+ Node* const result_map = LoadMap(resolution);
+ GotoIfNot(IsJSReceiverMap(result_map), &if_fulfill);
+
+ // We can skip the "then" lookup on {resolution} if its [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ BranchIfPromiseThenLookupChainIntact(native_context, result_map, &if_fast,
+ &if_slow);
+
+ // Resolution is a native promise and if it's already resolved or
+ // rejected, shortcircuit the resolution procedure by directly
+ // reusing the value from the promise.
+ BIND(&if_fast);
+ {
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ BIND(&if_slow);
+ {
+ // 8. Let then be Get(resolution, "then").
+ Node* const then =
+ GetProperty(context, resolution, isolate()->factory()->then_string());
+
+ // 9. If then is an abrupt completion, then
+ GotoIfException(then, &if_reject, &var_reason);
+
+ // 11. If IsCallable(thenAction) is false, then
+ GotoIf(TaggedIsSmi(then), &if_fulfill);
+ Node* const then_map = LoadMap(then);
+ GotoIfNot(IsCallableMap(then_map), &if_fulfill);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ BIND(&do_enqueue);
+ {
+ // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
+ // «promise, resolution, thenAction»).
+ Node* const task = AllocatePromiseResolveThenableJobTask(
+ promise, var_then.value(), resolution, native_context);
+ TailCallBuiltin(Builtins::kEnqueueMicrotask, native_context, task);
+ }
+
+ BIND(&if_fulfill);
+ {
+ // 7.b Return FulfillPromise(promise, resolution).
+ TailCallBuiltin(Builtins::kFulfillPromise, context, promise, resolution);
+ }
+
+ BIND(&if_runtime);
+ Return(CallRuntime(Runtime::kResolvePromise, context, promise, resolution));
+
+ BIND(&if_reject);
+ {
+ // 9.a Return RejectPromise(promise, then.[[Value]]).
+ TailCallBuiltin(Builtins::kRejectPromise, context, promise,
+ var_reason.value(), FalseConstant());
+ }
}
Node* PromiseBuiltinsAssembler::PerformPromiseAll(
@@ -1802,9 +1731,6 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const resolve_context =
CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementAlreadyVisitedSlot,
- SmiConstant(0));
- StoreContextElementNoWriteBarrier(
resolve_context, kPromiseAllResolveElementIndexSlot, var_index.value());
StoreContextElementNoWriteBarrier(
resolve_context, kPromiseAllResolveElementRemainingElementsSlot,
@@ -1944,7 +1870,8 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
// Don't fire debugEvent so that forwarding the rejection through all does not
// trigger redundant ExceptionEvents
Node* const debug_event = FalseConstant();
- Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
+ receiver, debug_event);
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
Label reject_promise(this, &var_exception, Label::kDeferred);
@@ -1987,19 +1914,16 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
SmiConstant(kPromiseAllResolveElementLength)));
- Label already_called(this), resolve_promise(this);
- GotoIf(SmiEqual(LoadContextElement(
- context, kPromiseAllResolveElementAlreadyVisitedSlot),
- SmiConstant(1)),
- &already_called);
- StoreContextElementNoWriteBarrier(
- context, kPromiseAllResolveElementAlreadyVisitedSlot, SmiConstant(1));
-
Node* const index =
LoadContextElement(context, kPromiseAllResolveElementIndexSlot);
Node* const values_array =
LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
+ Label already_called(this, Label::kDeferred), resolve_promise(this);
+ GotoIf(SmiLessThan(index, SmiConstant(Smi::kZero)), &already_called);
+ StoreContextElementNoWriteBarrier(context, kPromiseAllResolveElementIndexSlot,
+ SmiConstant(-1));
+
// Set element in FixedArray
Label runtime_set_element(this), did_set_element(this);
GotoIfNot(TaggedIsPositiveSmi(index), &runtime_set_element);
@@ -2070,7 +1994,8 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// Don't fire debugEvent so that forwarding the rejection through all does not
// trigger redundant ExceptionEvents
Node* const debug_event = FalseConstant();
- Node* const capability = NewPromiseCapability(context, receiver, debug_event);
+ Node* const capability = CallBuiltin(Builtins::kNewPromiseCapability, context,
+ receiver, debug_event);
Node* const resolve =
LoadObjectField(capability, PromiseCapability::kResolveOffset);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 366c7c22cd..2130101e84 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_PROMISE_H_
-#define V8_BUILTINS_BUILTINS_PROMISE_H_
+#ifndef V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
+#define V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
+#include "src/objects/promise.h"
namespace v8 {
namespace internal {
@@ -29,11 +30,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
protected:
enum PromiseAllResolveElementContextSlots {
- // Whether the resolve callback was already called.
- kPromiseAllResolveElementAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
-
- // Index into the values array
- kPromiseAllResolveElementIndexSlot,
+ // Index into the values array, or -1 if the callback was already called
+ kPromiseAllResolveElementIndexSlot = Context::MIN_CONTEXT_SLOTS,
// Remaining elements count (mutable HeapNumber)
kPromiseAllResolveElementRemainingElementsSlot,
@@ -90,8 +88,16 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* AllocateAndSetJSPromise(Node* context, v8::Promise::PromiseState status,
Node* result);
- Node* AllocatePromiseResolveThenableJobInfo(Node* result, Node* then,
- Node* resolve, Node* reject,
+ Node* AllocatePromiseReaction(Node* next, Node* payload,
+ Node* fulfill_handler, Node* reject_handler);
+
+ Node* AllocatePromiseReactionJobTask(Heap::RootListIndex map_root_index,
+ Node* context, Node* argument,
+ Node* handler, Node* payload);
+ Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument,
+ Node* handler, Node* payload);
+ Node* AllocatePromiseResolveThenableJobTask(Node* promise_to_resolve,
+ Node* then, Node* thenable,
Node* context);
std::pair<Node*, Node*> CreatePromiseResolvingFunctions(
@@ -105,50 +111,44 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreatePromiseGetCapabilitiesExecutorContext(Node* native_context,
Node* promise_capability);
- Node* NewPromiseCapability(Node* context, Node* constructor,
- Node* debug_event = nullptr);
-
protected:
void PromiseInit(Node* promise);
- Node* SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor);
-
void PromiseSetHasHandler(Node* promise);
void PromiseSetHandledHint(Node* promise);
- void AppendPromiseCallback(int offset, compiler::Node* promise,
- compiler::Node* value);
+ void PerformPromiseThen(Node* context, Node* promise, Node* on_fulfilled,
+ Node* on_rejected,
+ Node* result_promise_or_capability);
- Node* InternalPromiseThen(Node* context, Node* promise, Node* on_resolve,
- Node* on_reject);
-
- Node* InternalPerformPromiseThen(Node* context, Node* promise,
- Node* on_resolve, Node* on_reject,
- Node* deferred_promise,
- Node* deferred_on_resolve,
- Node* deferred_on_reject);
+ Node* CreatePromiseContext(Node* native_context, int slots);
- void InternalResolvePromise(Node* context, Node* promise, Node* result);
+ Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result,
+ PromiseReaction::Type type);
- void BranchIfFastPath(Node* context, Node* promise, Label* if_isunmodified,
- Label* if_ismodified);
+ // We can shortcut the SpeciesConstructor on {promise_map} if it's
+ // [[Prototype]] is the (initial) Promise.prototype and the @@species
+ // protector is intact, as that guards the lookup path for the "constructor"
+ // property on JSPromise instances which have the %PromisePrototype%.
+ void BranchIfPromiseSpeciesLookupChainIntact(Node* native_context,
+ Node* promise_map,
+ Label* if_fast, Label* if_slow);
- void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
- Label* if_isunmodified, Label* if_ismodified);
+ // We can skip the "then" lookup on {receiver_map} if it's [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then() protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ void BranchIfPromiseThenLookupChainIntact(Node* native_context,
+ Node* receiver_map, Label* if_fast,
+ Label* if_slow);
- Node* CreatePromiseContext(Node* native_context, int slots);
- void PromiseFulfill(Node* context, Node* promise, Node* result,
- v8::Promise::PromiseState status);
+ template <typename... TArgs>
+ Node* InvokeThen(Node* native_context, Node* receiver, TArgs... args);
void BranchIfAccessCheckFailed(Node* context, Node* native_context,
Node* promise_constructor, Node* executor,
Label* if_noaccess);
- void InternalPromiseReject(Node* context, Node* promise, Node* value,
- bool debug_event);
- void InternalPromiseReject(Node* context, Node* promise, Node* value,
- Node* debug_event);
std::pair<Node*, Node*> CreatePromiseFinallyFunctions(Node* on_finally,
Node* constructor,
Node* native_context);
@@ -174,9 +174,10 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
const NodeGenerator& handled_by);
Node* PromiseStatus(Node* promise);
- void PerformFulfillClosure(Node* context, Node* value, bool should_resolve);
- private:
+ void PromiseReactionJob(Node* context, Node* argument, Node* handler,
+ Node* payload, PromiseReaction::Type type);
+
Node* IsPromiseStatus(Node* actual, v8::Promise::PromiseState expected);
void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status);
@@ -186,4 +187,4 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_PROMISE_H_
+#endif // V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 64e838d53a..fb35f48a15 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -73,22 +73,57 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode) {
+ Comment("AllocateJSArrayForCodeStubArguments");
+
+ Label if_empty_array(this), allocate_js_array(this);
+ // Do not use AllocateJSArray since {elements} might end up in LOS.
+ VARIABLE(elements, MachineRepresentation::kTagged);
+
+ TNode<Smi> length = ParameterToTagged(argc, mode);
+ GotoIf(SmiEqual(length, SmiConstant(0)), &if_empty_array);
+ {
+ Label if_large_object(this, Label::kDeferred);
+ Node* allocated_elements = AllocateFixedArray(PACKED_ELEMENTS, argc, mode,
+ kAllowLargeObjectAllocation);
+ elements.Bind(allocated_elements);
+
+ VARIABLE(index, MachineType::PointerRepresentation(),
+ IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+ VariableList list({&index}, zone());
+
+ GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)),
+ &if_large_object);
+ args.ForEach(list, [=, &index](Node* arg) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements,
+ index.value(), arg);
+ Increment(&index, kPointerSize);
+ });
+ Goto(&allocate_js_array);
+
+ BIND(&if_large_object);
+ {
+ args.ForEach(list, [=, &index](Node* arg) {
+ Store(allocated_elements, index.value(), arg);
+ Increment(&index, kPointerSize);
+ });
+ Goto(&allocate_js_array);
+ }
+ }
+
+ BIND(&if_empty_array);
+ {
+ elements.Bind(EmptyFixedArrayConstant());
+ Goto(&allocate_js_array);
+ }
+
+ BIND(&allocate_js_array);
+ // Allocate the result JSArray.
Node* native_context = LoadNativeContext(context);
Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
- Node* argc_smi = ParameterToTagged(argc, mode);
-
- Node* array = AllocateJSArray(PACKED_ELEMENTS, array_map, argc, argc_smi,
- nullptr, mode);
- Node* elements = LoadElements(array);
-
- VARIABLE(index, MachineType::PointerRepresentation(),
- IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
- VariableList list({&index}, zone());
- args.ForEach(list, [=, &index](Node* arg) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, index.value(),
- arg);
- Increment(&index, kPointerSize);
- });
+ Node* array = AllocateUninitializedJSArrayWithoutElements(array_map, length);
+ StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset,
+ elements.value());
+
return array;
}
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 4227c628d1..45329eed70 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins-constructor-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
+#include "src/builtins/growable-fixed-array-gen.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
@@ -135,10 +136,9 @@ void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const context, Node* const regexp, Node* const match_info,
- Node* const string) {
+ TNode<String> const string) {
CSA_ASSERT(this, IsFixedArrayMap(LoadMap(match_info)));
CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, IsString(string));
Label named_captures(this), out(this);
@@ -152,7 +152,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// Calculate the substring of the first match before creating the result array
// to avoid an unnecessary write barrier storing the first result.
- Node* const first = SubString(context, string, start, end);
+
+ TNode<String> const first = SubString(string, SmiUntag(start), SmiUntag(end));
Node* const result =
AllocateRegExpResult(context, num_results, start, string);
@@ -188,7 +189,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
- Node* const capture = SubString(context, string, start, end);
+ TNode<String> const capture =
+ SubString(string, SmiUntag(start), SmiUntag(end));
StoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
@@ -441,18 +443,11 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// contains the uninitialized sentinel as a smi.
Node* const code = var_code.value();
-#ifdef DEBUG
- {
- Label next(this);
- GotoIfNot(TaggedIsSmi(code), &next);
-
- CSA_ASSERT(this,
- SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)));
- Goto(&next);
-
- BIND(&next);
- }
-#endif
+ CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ GotoIfNot(TaggedIsSmi(code), ok);
+ Branch(SmiEqual(code, SmiConstant(JSRegExp::kUninitializedValue)), ok,
+ not_ok);
+ });
GotoIf(TaggedIsSmi(code), &runtime);
CSA_ASSERT(this, HasInstanceType(code, CODE_TYPE));
@@ -475,7 +470,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Argument 1: Previous index.
MachineType arg1_type = type_int32;
- Node* const arg1 = TruncateWordToWord32(int_last_index);
+ Node* const arg1 = TruncateIntPtrToInt32(int_last_index);
// Argument 2: Start of string data.
MachineType arg2_type = type_ptr;
@@ -582,7 +577,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
[=, &var_to_offset](Node* offset) {
Node* const value = Load(MachineType::Int32(),
static_offsets_vector_address, offset);
- Node* const smi_value = SmiFromWord32(value);
+ Node* const smi_value = SmiFromInt32(value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, match_info,
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kPointerSize);
@@ -766,10 +761,9 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
- Node* const regexp,
- Node* const string,
- const bool is_fastpath) {
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
+ Node* const context, Node* const regexp, TNode<String> const string,
+ const bool is_fastpath) {
VARIABLE(var_result, MachineRepresentation::kTagged);
Label if_didnotmatch(this), out(this);
@@ -944,7 +938,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
// Slow path stub for RegExpPrototypeExec to decrease code size.
TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kReceiver);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const context = Parameter(Descriptor::kContext);
Return(RegExpPrototypeExecBody(context, regexp, string, false));
@@ -1030,7 +1024,7 @@ TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label if_isfastpath(this), if_isslowpath(this);
Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath,
@@ -1069,13 +1063,13 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
var_flags = SmiUntag(flags_smi);
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
- var_length = SmiAdd(var_length, SmiConstant(1)); \
- Goto(&next); \
- BIND(&next); \
+#define CASE_FOR_FLAG(FLAG) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
+ var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
+ Goto(&next); \
+ BIND(&next); \
} while (false)
CASE_FOR_FLAG(JSRegExp::kGlobal);
@@ -1099,8 +1093,8 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Label if_isflagset(this); \
BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
BIND(&if_isflagset); \
- var_length = SmiAdd(var_length, SmiConstant(1)); \
- var_flags = Signed(WordOr(var_flags, IntPtrConstant(FLAG))); \
+ var_length = SmiAdd(var_length.value(), SmiConstant(1)); \
+ var_flags = Signed(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
Goto(&next); \
BIND(&next); \
} while (false)
@@ -1118,7 +1112,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
// char for each set flag.
{
- Node* const result = AllocateSeqOneByteString(context, var_length);
+ Node* const result = AllocateSeqOneByteString(context, var_length.value());
VARIABLE(var_offset, MachineType::PointerRepresentation(),
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
@@ -1126,7 +1120,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
#define CASE_FOR_FLAG(FLAG, CHAR) \
do { \
Label next(this); \
- GotoIfNot(IsSetWord(var_flags, FLAG), &next); \
+ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
Node* const value = Int32Constant(CHAR); \
StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
var_offset.value(), value); \
@@ -1384,8 +1378,7 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
Label next(this);
GotoIf(IsUndefined(maybe_flags), &next);
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpFlags);
- TailCallRuntime(Runtime::kThrowTypeError, context, message_id);
+ ThrowTypeError(context, MessageTemplate::kRegExpFlags);
BIND(&next);
}
@@ -1450,12 +1443,8 @@ TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
BIND(&if_isnotprototype);
{
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
- Node* const method_name_str =
- HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
- "RegExp.prototype.source"));
- TailCallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
+ ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp,
+ "RegExp.prototype.source");
}
}
}
@@ -1465,7 +1454,7 @@ Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
JSRegExp::Flag flag) {
Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
Node* const mask = SmiConstant(flag);
- return SmiToWord32(SmiAnd(flags, mask));
+ return SmiToInt32(SmiAnd(flags, mask));
}
// Load through the GetProperty stub.
@@ -1533,8 +1522,6 @@ Node* RegExpBuiltinsAssembler::FlagGetter(Node* const context,
void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
JSRegExp::Flag flag, int counter,
const char* method_name) {
- Isolate* isolate = this->isolate();
-
// Check whether we have an unmodified regexp instance.
Label if_isunmodifiedjsregexp(this),
if_isnotunmodifiedjsregexp(this, Label::kDeferred);
@@ -1573,14 +1560,7 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
}
BIND(&if_isnotprototype);
- {
- Node* const message_id = SmiConstant(MessageTemplate::kRegExpNonRegExp);
- Node* const method_name_str = HeapConstant(
- isolate->factory()->NewStringFromAsciiChecked(method_name));
- CallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kRegExpNonRegExp, method_name); }
}
}
@@ -1707,7 +1687,7 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -1795,163 +1775,14 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
return var_result.value();
}
-namespace {
-
-// Utility class implementing a growable fixed array through CSA.
-class GrowableFixedArray {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- public:
- explicit GrowableFixedArray(CodeStubAssembler* a)
- : assembler_(a),
- var_array_(a, MachineRepresentation::kTagged),
- var_length_(a, MachineType::PointerRepresentation()),
- var_capacity_(a, MachineType::PointerRepresentation()) {
- Initialize();
- }
-
- Node* length() const { return var_length_.value(); }
-
- Variable* var_array() { return &var_array_; }
- Variable* var_length() { return &var_length_; }
- Variable* var_capacity() { return &var_capacity_; }
-
- void Push(Node* const value) {
- CodeStubAssembler* a = assembler_;
-
- Node* const length = var_length_.value();
- Node* const capacity = var_capacity_.value();
-
- Label grow(a), store(a);
- a->Branch(a->IntPtrEqual(capacity, length), &grow, &store);
-
- a->BIND(&grow);
- {
- Node* const new_capacity = NewCapacity(a, capacity);
- Node* const new_array = ResizeFixedArray(length, new_capacity);
-
- var_capacity_.Bind(new_capacity);
- var_array_.Bind(new_array);
- a->Goto(&store);
- }
-
- a->BIND(&store);
- {
- Node* const array = var_array_.value();
- a->StoreFixedArrayElement(array, length, value);
-
- Node* const new_length = a->IntPtrAdd(length, a->IntPtrConstant(1));
- var_length_.Bind(new_length);
- }
- }
-
- Node* ToJSArray(Node* const context) {
- CodeStubAssembler* a = assembler_;
-
- const ElementsKind kind = PACKED_ELEMENTS;
-
- Node* const native_context = a->LoadNativeContext(context);
- Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
-
- // Shrink to fit if necessary.
- {
- Label next(a);
-
- Node* const length = var_length_.value();
- Node* const capacity = var_capacity_.value();
-
- a->GotoIf(a->WordEqual(length, capacity), &next);
-
- Node* const array = ResizeFixedArray(length, length);
- var_array_.Bind(array);
- var_capacity_.Bind(length);
- a->Goto(&next);
-
- a->BIND(&next);
- }
-
- Node* const result_length = a->SmiTag(length());
- Node* const result = a->AllocateUninitializedJSArrayWithoutElements(
- array_map, result_length, nullptr);
-
- // Note: We do not currently shrink the fixed array.
-
- a->StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
-
- return result;
- }
-
- private:
- void Initialize() {
- CodeStubAssembler* a = assembler_;
-
- const ElementsKind kind = PACKED_ELEMENTS;
-
- static const int kInitialArraySize = 8;
- Node* const capacity = a->IntPtrConstant(kInitialArraySize);
- Node* const array = a->AllocateFixedArray(kind, capacity);
-
- a->FillFixedArrayWithValue(kind, array, a->IntPtrConstant(0), capacity,
- Heap::kTheHoleValueRootIndex);
-
- var_array_.Bind(array);
- var_capacity_.Bind(capacity);
- var_length_.Bind(a->IntPtrConstant(0));
- }
-
- Node* NewCapacity(CodeStubAssembler* a,
- compiler::SloppyTNode<IntPtrT> current_capacity) {
- CSA_ASSERT(a, a->IntPtrGreaterThan(current_capacity, a->IntPtrConstant(0)));
-
- // Growth rate is analog to JSObject::NewElementsCapacity:
- // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
-
- Node* const new_capacity = a->IntPtrAdd(
- a->IntPtrAdd(current_capacity, a->WordShr(current_capacity, 1)),
- a->IntPtrConstant(16));
-
- return new_capacity;
- }
-
- // Creates a new array with {new_capacity} and copies the first
- // {element_count} elements from the current array.
- Node* ResizeFixedArray(Node* const element_count, Node* const new_capacity) {
- CodeStubAssembler* a = assembler_;
-
- CSA_ASSERT(a, a->IntPtrGreaterThan(element_count, a->IntPtrConstant(0)));
- CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
- CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
-
- Node* const from_array = var_array_.value();
-
- CodeStubAssembler::ExtractFixedArrayFlags flags;
- flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
- Node* to_array = a->ExtractFixedArray(from_array, nullptr, element_count,
- new_capacity, flags);
-
- return to_array;
- }
-
- private:
- CodeStubAssembler* const assembler_;
- Variable var_array_;
- Variable var_length_;
- Variable var_capacity_;
-};
-
-} // namespace
-
void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const regexp,
- Node* const string,
+ TNode<String> string,
const bool is_fastpath) {
- CSA_ASSERT(this, IsString(string));
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
Node* const int_zero = IntPtrConstant(0);
Node* const smi_zero = SmiConstant(0);
-
Node* const is_global =
FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
@@ -1975,7 +1806,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Allocate an array to store the resulting match strings.
- GrowableFixedArray array(this);
+ GrowableFixedArray array(state());
// Loop preparations. Within the loop, collect results from RegExpExec
// and store match strings in the array.
@@ -2001,9 +1832,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const match_to = LoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- Node* match = SubString(context, string, match_from, match_to);
- var_match.Bind(match);
-
+ var_match.Bind(
+ SubString(string, SmiUntag(match_from), SmiUntag(match_to)));
Goto(&if_didmatch);
} else {
DCHECK(!is_fastpath);
@@ -2052,7 +1882,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Store the match, growing the fixed array if needed.
- array.Push(match);
+ array.Push(CAST(match));
// Advance last index if the match is the empty string.
@@ -2087,7 +1917,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// Wrap the match in a JSArray.
- Node* const result = array.ToJSArray(context);
+ Node* const result = array.ToJSArray(CAST(context));
Return(result);
}
}
@@ -2107,7 +1937,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2126,7 +1956,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
// 2) pattern is a string
TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const string = Parameter(Descriptor::kPattern);
+ TNode<String> const string = CAST(Parameter(Descriptor::kPattern));
Node* const context = Parameter(Descriptor::kContext);
RegExpPrototypeMatchBody(context, receiver, string, true);
@@ -2248,7 +2078,7 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -2277,12 +2107,11 @@ TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) {
// JSRegExp, {string} is a String, and {limit} is a Smi.
void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const regexp,
- Node* const string,
+ TNode<String> string,
Node* const limit) {
CSA_ASSERT(this, IsFastRegExp(context, regexp));
CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
CSA_ASSERT(this, TaggedIsSmi(limit));
- CSA_ASSERT(this, IsString(string));
TNode<Smi> const smi_zero = SmiConstant(0);
TNode<IntPtrT> const int_zero = IntPtrConstant(0);
@@ -2343,7 +2172,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// Loop preparations.
- GrowableFixedArray array(this);
+ GrowableFixedArray array(state());
VARIABLE(var_last_matched_until, MachineRepresentation::kTagged);
VARIABLE(var_next_search_from, MachineRepresentation::kTagged);
@@ -2422,10 +2251,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Node* const from = last_matched_until;
Node* const to = match_from;
-
- Node* const substr = SubString(context, string, from, to);
- array.Push(substr);
-
+ array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
GotoIf(WordEqual(array.length(), int_limit), &out);
}
@@ -2462,21 +2288,19 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&select_capture);
{
- Node* const substr = SubString(context, string, from, to);
- var_value.Bind(substr);
+ var_value.Bind(SubString(string, SmiUntag(from), SmiUntag(to)));
Goto(&store_value);
}
BIND(&select_undefined);
{
- Node* const undefined = UndefinedConstant();
- var_value.Bind(undefined);
+ var_value.Bind(UndefinedConstant());
Goto(&store_value);
}
BIND(&store_value);
{
- array.Push(var_value.value());
+ array.Push(CAST(var_value.value()));
GotoIf(WordEqual(array.length(), int_limit), &out);
Node* const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
@@ -2499,16 +2323,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
{
Node* const from = var_last_matched_until.value();
Node* const to = string_length;
-
- Node* const substr = SubString(context, string, from, to);
- array.Push(substr);
-
+ array.Push(SubString(string, SmiUntag(from), SmiUntag(to)));
Goto(&out);
}
BIND(&out);
{
- Node* const result = array.ToJSArray(context);
+ Node* const result = array.ToJSArray(CAST(context));
Return(result);
}
@@ -2525,12 +2346,11 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const maybe_limit = Parameter(Descriptor::kLimit);
Node* const context = Parameter(Descriptor::kContext);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
// TODO(jgruber): Even if map checks send us to the fast path, we still need
// to verify the constructor property and jump to the slow path if it has
@@ -2600,7 +2420,7 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
Label stub(this), runtime(this, Label::kDeferred);
BranchIfFastRegExp(context, receiver, &stub, &runtime);
@@ -2700,9 +2520,9 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Goto(&loop);
BIND(&loop);
{
- GotoIfNot(IntPtrLessThan(var_i, end), &create_result);
+ GotoIfNot(IntPtrLessThan(var_i.value(), end), &create_result);
- Node* const elem = LoadFixedArrayElement(res_elems, var_i);
+ Node* const elem = LoadFixedArrayElement(res_elems, var_i.value());
Label if_issmi(this), if_isstring(this), loop_epilogue(this);
Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
@@ -2726,9 +2546,10 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&if_isnegativeorzero);
{
- var_i = IntPtrAdd(var_i, int_one);
+ var_i = IntPtrAdd(var_i.value(), int_one);
- Node* const next_elem = LoadFixedArrayElement(res_elems, var_i);
+ Node* const next_elem =
+ LoadFixedArrayElement(res_elems, var_i.value());
var_match_start = SmiSub(next_elem, elem);
Goto(&loop_epilogue);
@@ -2740,13 +2561,14 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
CSA_ASSERT(this, IsString(elem));
Callable call_callable = CodeFactory::Call(isolate);
- TNode<Smi> match_start = var_match_start;
+ TNode<Smi> match_start = var_match_start.value();
Node* const replacement_obj =
CallJS(call_callable, context, replace_callable, undefined, elem,
match_start, string);
- Node* const replacement_str = ToString_Inline(context, replacement_obj);
- StoreFixedArrayElement(res_elems, var_i, replacement_str);
+ TNode<String> const replacement_str =
+ ToString_Inline(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, var_i.value(), replacement_str);
TNode<Smi> const elem_length = LoadStringLengthAsSmi(elem);
var_match_start = SmiAdd(match_start, elem_length);
@@ -2756,7 +2578,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&loop_epilogue);
{
- var_i = IntPtrAdd(var_i, int_one);
+ var_i = IntPtrAdd(var_i.value(), int_one);
Goto(&loop);
}
}
@@ -2795,7 +2617,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// Overwrite the i'th element in the results with the string
// we got back from the callback function.
- Node* const replacement_str =
+ TNode<String> const replacement_str =
ToString_Inline(context, replacement_obj);
StoreFixedArrayElement(res_elems, index, replacement_str);
@@ -2821,20 +2643,19 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
}
Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
- Node* context, Node* regexp, Node* string, Node* replace_string) {
+ Node* context, Node* regexp, TNode<String> string,
+ TNode<String> replace_string) {
// The fast path is reached only if {receiver} is an unmodified
// JSRegExp instance, {replace_value} is non-callable, and
// ToString({replace_value}) does not contain '$', i.e. we're doing a simple
// string replacement.
+ CSA_ASSERT(this, IsFastRegExp(context, regexp));
+
Node* const smi_zero = SmiConstant(0);
const bool kIsFastPath = true;
- CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(replace_string));
- CSA_ASSERT(this, IsString(string));
-
- VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
+ TVARIABLE(String, var_result, EmptyStringConstant());
VARIABLE(var_match_indices, MachineRepresentation::kTagged);
VARIABLE(var_last_match_end, MachineRepresentation::kTagged, smi_zero);
VARIABLE(var_is_unicode, MachineRepresentation::kWord32, Int32Constant(0));
@@ -2871,22 +2692,21 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
{
// TODO(jgruber): We could skip many of the checks that using SubString
// here entails.
- Node* const first_part =
- SubString(context, string, var_last_match_end.value(), match_start);
-
- Node* const result = StringAdd(context, var_result.value(), first_part);
- var_result.Bind(result);
+ TNode<String> const first_part =
+ SubString(string, SmiUntag(var_last_match_end.value()),
+ SmiUntag(match_start));
+ var_result = StringAdd(context, var_result.value(), first_part);
Goto(&loop_end);
}
BIND(&if_replaceisnotempty);
{
- Node* const first_part =
- SubString(context, string, var_last_match_end.value(), match_start);
-
- Node* result = StringAdd(context, var_result.value(), first_part);
- result = StringAdd(context, result, replace_string);
- var_result.Bind(result);
+ TNode<String> const first_part =
+ SubString(string, SmiUntag(var_last_match_end.value()),
+ SmiUntag(match_start));
+ TNode<String> result =
+ StringAdd(context, var_result.value(), first_part);
+ var_result = StringAdd(context, result, replace_string);
Goto(&loop_end);
}
@@ -2910,10 +2730,9 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
BIND(&if_nofurthermatches);
{
TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
- Node* const last_part =
- SubString(context, string, var_last_match_end.value(), string_length);
- Node* const result = StringAdd(context, var_result.value(), last_part);
- var_result.Bind(result);
+ TNode<String> const last_part = SubString(
+ string, SmiUntag(var_last_match_end.value()), SmiUntag(string_length));
+ var_result = StringAdd(context, var_result.value(), last_part);
Goto(&out);
}
@@ -2924,12 +2743,11 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const replace_value = Parameter(Descriptor::kReplaceValue);
Node* const context = Parameter(Descriptor::kContext);
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
Label checkreplacestring(this), if_iscallable(this),
runtime(this, Label::kDeferred);
@@ -2942,7 +2760,8 @@ TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
// 3. Does ToString({replace_value}) contain '$'?
BIND(&checkreplacestring);
{
- Node* const replace_string = ToString_Inline(context, replace_value);
+ TNode<String> const replace_string =
+ ToString_Inline(context, replace_value);
// ToString(replaceValue) could potentially change the shape of the RegExp
// object. Recheck that we are still on the fast path and bail to runtime
@@ -3028,7 +2847,7 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = ToString_Inline(context, maybe_string);
+ TNode<String> const string = ToString_Inline(context, maybe_string);
// Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
Label stub(this), runtime(this, Label::kDeferred);
@@ -3046,27 +2865,19 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
// Simple string matching functionality for internal use which does not modify
// the last match info.
TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
- Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const string = Parameter(Descriptor::kString);
+ TNode<JSRegExp> const regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> const string = CAST(Parameter(Descriptor::kString));
Node* const context = Parameter(Descriptor::kContext);
Node* const smi_zero = SmiConstant(0);
-
- CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, IsString(string));
-
Node* const native_context = LoadNativeContext(context);
Node* const internal_match_info = LoadContextElement(
native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
-
Node* const match_indices = RegExpExecInternal(context, regexp, string,
smi_zero, internal_match_info);
-
Node* const null = NullConstant();
- Label if_matched(this), if_didnotmatch(this);
- Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
-
- BIND(&if_didnotmatch);
+ Label if_matched(this);
+ GotoIfNot(WordEqual(match_indices, null), &if_matched);
Return(null);
BIND(&if_matched);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index c8a94b7293..b57b90acf9 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BUILTINS_BUILTINS_REGEXP_H_
-#define V8_BUILTINS_BUILTINS_REGEXP_H_
+#ifndef V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
+#define V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
#include "src/code-stub-assembler.h"
@@ -50,7 +50,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* ConstructNewResultFromMatchInfo(Node* const context, Node* const regexp,
Node* const match_info,
- Node* const string);
+ TNode<String> const string);
Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
Node* const regexp,
@@ -58,7 +58,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Label* if_didnotmatch,
const bool is_fastpath);
Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
+ TNode<String> string, const bool is_fastpath);
Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
MessageTemplate::Template msg_template,
@@ -100,7 +100,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const is_unicode, bool is_fastpath);
void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
- Node* const string, const bool is_fastpath);
+ TNode<String> const string,
+ const bool is_fastpath);
void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
Node* const string);
@@ -108,15 +109,16 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const string);
void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
- Node* const string, Node* const limit);
+ TNode<String> const string, Node* const limit);
Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
Node* replace_callable);
- Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
- Node* replace_string);
+ Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp,
+ TNode<String> string,
+ TNode<String> replace_string);
};
} // namespace internal
} // namespace v8
-#endif // V8_BUILTINS_BUILTINS_REGEXP_H_
+#endif // V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 278a48c68e..2c9f0791da 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -69,9 +69,8 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
BIND(&invalid);
{
- CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
- tagged);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kNotIntegerSharedTypedArray,
+ tagged);
}
BIND(&not_float_or_clamped);
@@ -96,15 +95,12 @@ Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
// The |number_index| output parameter is used only for architectures that
// don't currently have a TF implementation and forward to runtime functions
// instead; they expect the value has already been coerced to an integer.
- *number_index = ToSmiIndex(tagged, context, &range_error);
- var_result.Bind(SmiToWord32(*number_index));
+ *number_index = ToSmiIndex(CAST(tagged), CAST(context), &range_error);
+ var_result.Bind(SmiToInt32(*number_index));
Goto(&done);
BIND(&range_error);
- {
- CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
- Unreachable();
- }
+ { ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); }
BIND(&done);
return var_result.value();
@@ -119,8 +115,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
context, LoadObjectField(array, JSTypedArray::kLengthOffset));
GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
- CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
BIND(&check_passed);
}
@@ -169,20 +164,20 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(
- AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+ Return(
+ SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
BIND(&u8);
- Return(SmiFromWord32(
+ Return(SmiFromInt32(
AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
BIND(&i16);
- Return(SmiFromWord32(
+ Return(SmiFromInt32(
AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1))));
BIND(&u16);
- Return(SmiFromWord32(AtomicLoad(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1))));
BIND(&i32);
Return(ChangeInt32ToTagged(
@@ -293,20 +288,20 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(AtomicExchange(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
+ index_word, value_word32)));
BIND(&u8);
- Return(SmiFromWord32(AtomicExchange(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
+ index_word, value_word32)));
BIND(&i16);
- Return(SmiFromWord32(AtomicExchange(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&u16);
- Return(SmiFromWord32(AtomicExchange(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&i32);
Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
@@ -371,22 +366,22 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32(AtomicCompareExchange(MachineType::Int8(), backing_store,
- index_word, old_value_word32,
- new_value_word32)));
+ Return(SmiFromInt32(AtomicCompareExchange(MachineType::Int8(), backing_store,
+ index_word, old_value_word32,
+ new_value_word32)));
BIND(&u8);
- Return(SmiFromWord32(
- AtomicCompareExchange(MachineType::Uint8(), backing_store, index_word,
- old_value_word32, new_value_word32)));
+ Return(SmiFromInt32(AtomicCompareExchange(MachineType::Uint8(), backing_store,
+ index_word, old_value_word32,
+ new_value_word32)));
BIND(&i16);
- Return(SmiFromWord32(AtomicCompareExchange(
+ Return(SmiFromInt32(AtomicCompareExchange(
MachineType::Int16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
BIND(&u16);
- Return(SmiFromWord32(AtomicCompareExchange(
+ Return(SmiFromInt32(AtomicCompareExchange(
MachineType::Uint16(), backing_store, WordShl(index_word, 1),
old_value_word32, new_value_word32)));
@@ -468,22 +463,20 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromWord32((this->*function)(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
+ index_word, value_word32)));
BIND(&u8);
- Return(SmiFromWord32((this->*function)(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
+ index_word, value_word32)));
BIND(&i16);
- Return(
- SmiFromWord32((this->*function)(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&u16);
- Return(
- SmiFromWord32((this->*function)(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
+ WordShl(index_word, 1), value_word32)));
BIND(&i32);
Return(ChangeInt32ToTagged(
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 195572de8e..5cc4621b84 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -124,42 +124,6 @@ Node* StringBuiltinsAssembler::PointerToStringDataAtIndex(
return IntPtrAdd(string_data, offset_in_bytes);
}
-void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
- Node* context, Variable* var_start, Node* start, Node* string_length) {
- TNode<Object> const start_int = ToInteger_Inline(
- CAST(context), CAST(start), CodeStubAssembler::kTruncateMinusZero);
- TNode<Smi> const zero = SmiConstant(0);
-
- Label done(this);
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
- Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
-
- BIND(&if_issmi);
- {
- TNode<Smi> const start_int_smi = CAST(start_int);
- var_start->Bind(Select(
- SmiLessThan(start_int_smi, zero),
- [&] { return SmiMax(SmiAdd(string_length, start_int_smi), zero); },
- [&] { return start_int_smi; }, MachineRepresentation::kTagged));
- Goto(&done);
- }
-
- BIND(&if_isheapnumber);
- {
- // If {start} is a heap number, it is definitely out of bounds. If it is
- // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
- // positive, set {start} to {string_length} which ultimately results in
- // returning an empty string.
- TNode<HeapNumber> const start_int_hn = CAST(start_int);
- TNode<Float64T> const float_zero = Float64Constant(0.);
- TNode<Float64T> const start_float = LoadHeapNumberValue(start_int_hn);
- var_start->Bind(SelectTaggedConstant<Smi>(
- Float64LessThan(start_float, float_zero), zero, string_length));
- Goto(&done);
- }
- BIND(&done);
-}
-
void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
Node* right) {
VARIABLE(var_left, MachineRepresentation::kTagged, left);
@@ -300,21 +264,23 @@ void StringBuiltinsAssembler::StringEqual_Loop(
{
// If {offset} equals {end}, no difference was found, so the
// strings are equal.
- GotoIf(WordEqual(var_offset, length), if_equal);
+ GotoIf(WordEqual(var_offset.value(), length), if_equal);
// Load the next characters from {lhs} and {rhs}.
Node* lhs_value =
Load(lhs_type, lhs_data,
- WordShl(var_offset, ElementSizeLog2Of(lhs_type.representation())));
+ WordShl(var_offset.value(),
+ ElementSizeLog2Of(lhs_type.representation())));
Node* rhs_value =
Load(rhs_type, rhs_data,
- WordShl(var_offset, ElementSizeLog2Of(rhs_type.representation())));
+ WordShl(var_offset.value(),
+ ElementSizeLog2Of(rhs_type.representation())));
// Check if the characters match.
GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
// Advance to next character.
- var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
Goto(&loop);
}
}
@@ -408,13 +374,13 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
{
// Check if {offset} equals {end}.
Label if_done(this), if_notdone(this);
- Branch(WordEqual(var_offset, end), &if_done, &if_notdone);
+ Branch(WordEqual(var_offset.value(), end), &if_done, &if_notdone);
BIND(&if_notdone);
{
// Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset);
- Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset);
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, var_offset.value());
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, var_offset.value());
// Check if the characters match.
Label if_valueissame(this), if_valueisnotsame(this);
@@ -424,7 +390,7 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
BIND(&if_valueissame);
{
// Advance to next character.
- var_offset = IntPtrAdd(var_offset, IntPtrConstant(1));
+ var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(1));
}
Goto(&loop);
@@ -563,20 +529,21 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
Return(result);
}
-TF_BUILTIN(StringCharCodeAt, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAtUTF16, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
-
+ // TODO(sigurds) Figure out if passing length as argument pays off.
+ TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
// Load the character code at the {position} from the {receiver}.
- TNode<Int32T> code = StringCharCodeAt(receiver, position);
-
+ TNode<Int32T> code =
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
-TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAtUTF32, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
@@ -587,7 +554,7 @@ TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromWord32(code);
+ TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
@@ -648,11 +615,12 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// The {code16} fits into the SeqOneByteString {one_byte_result}.
Node* offset = ElementOffsetFromIndex(
- var_max_index, UINT8_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
+ var_max_index.value(), UINT8_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
});
arguments.PopAndReturn(one_byte_result);
@@ -667,16 +635,17 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
// their corresponding positions in the new 16-bit string.
TNode<IntPtrT> zero = IntPtrConstant(0);
CopyStringCharacters(one_byte_result, two_byte_result, zero, zero,
- var_max_index, String::ONE_BYTE_ENCODING,
+ var_max_index.value(), String::ONE_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
// Write the character that caused the 8-bit to 16-bit fault.
- Node* max_index_offset = ElementOffsetFromIndex(
- var_max_index, UINT16_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ Node* max_index_offset =
+ ElementOffsetFromIndex(var_max_index.value(), UINT16_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
max_index_offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
// Resume copying the passed-in arguments from the same place where the
// 8-bit copy stopped, but this time copying over all of the characters
@@ -689,14 +658,14 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
Node* offset = ElementOffsetFromIndex(
- var_max_index, UINT16_ELEMENTS,
+ var_max_index.value(), UINT16_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
offset, code16);
- var_max_index = IntPtrAdd(var_max_index, IntPtrConstant(1));
+ var_max_index = IntPtrAdd(var_max_index.value(), IntPtrConstant(1));
},
- var_max_index);
+ var_max_index.value());
arguments.PopAndReturn(two_byte_result);
}
@@ -728,7 +697,7 @@ TF_BUILTIN(StringPrototypeCharCodeAt, StringBuiltinsAssembler) {
[this](TNode<String> receiver, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
Node* value = StringCharCodeAt(receiver, index);
- return SmiFromWord32(value);
+ return SmiFromInt32(value);
});
}
@@ -742,9 +711,11 @@ TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) {
maybe_position, UndefinedConstant(),
[this](TNode<String> receiver, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
+ // This is always a call to a builtin from Javascript,
+ // so we need to produce UTF32.
Node* value = LoadSurrogatePairAt(receiver, length, index,
UnicodeEncoding::UTF32);
- return SmiFromWord32(value);
+ return SmiFromInt32(value);
});
}
@@ -1044,8 +1015,8 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
Branch(IsNullOrUndefined(value), &throw_exception, &out);
BIND(&throw_exception);
- TailCallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
- StringConstant(method_name));
+ ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
+ method_name);
BIND(&out);
}
@@ -1173,8 +1144,8 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
Node* const matched =
- CallBuiltin(Builtins::kSubString, context, subject_string,
- match_start_index, match_end_index);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ SmiUntag(match_start_index), SmiUntag(match_end_index));
Node* const replacement_string =
CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
match_start_index, replace_string, dollar_index);
@@ -1242,11 +1213,10 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) {
BIND(&invalid_count);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidCountValue),
- var_count.value());
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidCountValue,
+ var_count.value());
}
+
BIND(&invalid_string_length);
{
CallRuntime(Runtime::kThrowInvalidStringLength, context);
@@ -1288,7 +1258,7 @@ TF_BUILTIN(StringRepeat, StringBuiltinsAssembler) {
{
{
Label next(this);
- GotoIfNot(SmiToWord32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
+ GotoIfNot(SmiToInt32(SmiAnd(var_count.value(), SmiConstant(1))), &next);
var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
var_temp.value()));
Goto(&next);
@@ -1412,8 +1382,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
GotoIf(SmiEqual(match_start_index, smi_zero), &next);
Node* const prefix =
- CallBuiltin(Builtins::kSubString, context, subject_string, smi_zero,
- match_start_index);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ IntPtrConstant(0), SmiUntag(match_start_index));
var_result.Bind(prefix);
Goto(&next);
@@ -1453,8 +1423,8 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&out);
{
Node* const suffix =
- CallBuiltin(Builtins::kSubString, context, subject_string,
- match_end_index, subject_length);
+ CallBuiltin(Builtins::kStringSubstring, context, subject_string,
+ SmiUntag(match_end_index), SmiUntag(subject_length));
Node* const result =
CallStub(stringadd_callable, context, var_result.value(), suffix);
Return(result);
@@ -1587,14 +1557,15 @@ class StringPadAssembler : public StringBuiltinsAssembler {
GotoIf(IsUndefined(fill), &pad);
var_fill_string = ToString_Inline(context, fill);
- var_fill_length = LoadStringLengthAsWord(var_fill_string);
+ var_fill_length = LoadStringLengthAsWord(var_fill_string.value());
- Branch(IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)), &pad,
- &dont_pad);
+ Branch(IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)),
+ &pad, &dont_pad);
}
BIND(&pad);
{
- CSA_ASSERT(this, IntPtrGreaterThan(var_fill_length, IntPtrConstant(0)));
+ CSA_ASSERT(this,
+ IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)));
CSA_ASSERT(this, SmiGreaterThan(max_length, string_length));
Callable stringadd_callable =
@@ -1604,38 +1575,37 @@ class StringPadAssembler : public StringBuiltinsAssembler {
VARIABLE(var_pad, MachineRepresentation::kTagged);
Label single_char_fill(this), multi_char_fill(this), return_result(this);
- Branch(IntPtrEqual(var_fill_length, IntPtrConstant(1)), &single_char_fill,
- &multi_char_fill);
+ Branch(IntPtrEqual(var_fill_length.value(), IntPtrConstant(1)),
+ &single_char_fill, &multi_char_fill);
// Fast path for a single character fill. No need to calculate number of
// repetitions or remainder.
BIND(&single_char_fill);
{
var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
- static_cast<Node*>(var_fill_string),
+ static_cast<Node*>(var_fill_string.value()),
pad_length));
Goto(&return_result);
}
BIND(&multi_char_fill);
{
TNode<Int32T> const fill_length_word32 =
- TruncateWordToWord32(var_fill_length);
- TNode<Int32T> const pad_length_word32 = SmiToWord32(pad_length);
+ TruncateIntPtrToInt32(var_fill_length.value());
+ TNode<Int32T> const pad_length_word32 = SmiToInt32(pad_length);
TNode<Int32T> const repetitions_word32 =
Int32Div(pad_length_word32, fill_length_word32);
TNode<Int32T> const remaining_word32 =
Int32Mod(pad_length_word32, fill_length_word32);
var_pad.Bind(CallBuiltin(Builtins::kStringRepeat, context,
- static_cast<Node*>(var_fill_string),
- SmiFromWord32(repetitions_word32)));
+ var_fill_string.value(),
+ SmiFromInt32(repetitions_word32)));
GotoIfNot(remaining_word32, &return_result);
{
- Node* const remainder_string =
- CallBuiltin(Builtins::kSubString, context,
- static_cast<Node*>(var_fill_string), SmiConstant(0),
- SmiFromWord32(remaining_word32));
+ Node* const remainder_string = CallBuiltin(
+ Builtins::kStringSubstring, context, var_fill_string.value(),
+ IntPtrConstant(0), ChangeInt32ToIntPtr(remaining_word32));
var_pad.Bind(CallStub(stringadd_callable, context, var_pad.value(),
remainder_string));
Goto(&return_result);
@@ -1679,8 +1649,8 @@ TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
// ES6 section 21.1.3.18 String.prototype.slice ( start, end )
TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
Label out(this);
- VARIABLE(var_start, MachineRepresentation::kTagged);
- VARIABLE(var_end, MachineRepresentation::kTagged);
+ TVARIABLE(IntPtrT, var_start);
+ TVARIABLE(IntPtrT, var_end);
const int kStart = 0;
const int kEnd = 1;
@@ -1688,69 +1658,38 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start = args.GetOptionalArgumentValue(kStart);
- TNode<Object> end = CAST(args.GetOptionalArgumentValue(kEnd));
+ TNode<Object> start = args.GetOptionalArgumentValue(kStart);
+ TNode<Object> end = args.GetOptionalArgumentValue(kEnd);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
- TNode<Smi> const smi_zero = SmiConstant(0);
-
// 1. Let O be ? RequireObjectCoercible(this value).
RequireObjectCoercible(context, receiver, "String.prototype.slice");
// 2. Let S be ? ToString(O).
- Node* const subject_string =
- CallBuiltin(Builtins::kToString, context, receiver);
+ TNode<String> const subject_string =
+ CAST(CallBuiltin(Builtins::kToString, context, receiver));
// 3. Let len be the number of elements in S.
- TNode<Smi> const length = LoadStringLengthAsSmi(subject_string);
+ TNode<IntPtrT> const length = LoadStringLengthAsWord(subject_string);
- // Conversions and bounds-checks for {start}.
- ConvertAndBoundsCheckStartArgument(context, &var_start, start, length);
+ // Convert {start} to a relative index.
+ var_start = ConvertToRelativeIndex(context, start, length);
// 5. If end is undefined, let intEnd be len;
- var_end.Bind(length);
+ var_end = length;
GotoIf(IsUndefined(end), &out);
- // else let intEnd be ? ToInteger(end).
- Node* const end_int =
- ToInteger_Inline(context, end, CodeStubAssembler::kTruncateMinusZero);
-
- // 7. If intEnd < 0, let to be max(len + intEnd, 0);
- // otherwise let to be min(intEnd, len).
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
- Branch(TaggedIsSmi(end_int), &if_issmi, &if_isheapnumber);
-
- BIND(&if_issmi);
- {
- Node* const length_plus_end = SmiAdd(length, end_int);
- var_end.Bind(Select(SmiLessThan(end_int, smi_zero),
- [&] { return SmiMax(length_plus_end, smi_zero); },
- [&] { return SmiMin(length, end_int); },
- MachineRepresentation::kTagged));
- Goto(&out);
- }
-
- BIND(&if_isheapnumber);
- {
- // If {end} is a heap number, it is definitely out of bounds. If it is
- // negative, {int_end} = max({length} + {int_end}),0) = 0'. If it is
- // positive, set {int_end} to {length} which ultimately results in
- // returning an empty string.
- Node* const float_zero = Float64Constant(0.);
- Node* const end_float = LoadHeapNumberValue(end_int);
- var_end.Bind(SelectTaggedConstant<Smi>(
- Float64LessThan(end_float, float_zero), smi_zero, length));
- Goto(&out);
- }
+ // Convert {end} to a relative index.
+ var_end = ConvertToRelativeIndex(context, end, length);
+ Goto(&out);
Label return_emptystring(this);
BIND(&out);
{
- GotoIf(SmiLessThanOrEqual(var_end.value(), var_start.value()),
+ GotoIf(IntPtrLessThanOrEqual(var_end.value(), var_start.value()),
&return_emptystring);
- Node* const result =
- SubString(context, subject_string, var_start.value(), var_end.value(),
- SubStringFlags::FROM_TO_ARE_BOUNDED);
+ TNode<String> const result =
+ SubString(subject_string, var_start.value(), var_end.value());
args.PopAndReturn(result);
}
@@ -1868,25 +1807,25 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
CodeStubArguments args(this, argc);
Node* const receiver = args.GetReceiver();
- Node* const start = args.GetOptionalArgumentValue(kStartArg);
- TNode<Object> length = CAST(args.GetOptionalArgumentValue(kLengthArg));
+ TNode<Object> start = args.GetOptionalArgumentValue(kStartArg);
+ TNode<Object> length = args.GetOptionalArgumentValue(kLengthArg);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Label out(this);
- TVARIABLE(Smi, var_start);
+ TVARIABLE(IntPtrT, var_start);
TVARIABLE(Number, var_length);
- TNode<Smi> const zero = SmiConstant(0);
+ TNode<IntPtrT> const zero = IntPtrConstant(0);
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string =
+ TNode<String> const string =
ToThisString(context, receiver, "String.prototype.substr");
- TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
- // Conversions and bounds-checks for {start}.
- ConvertAndBoundsCheckStartArgument(context, &var_start, start, string_length);
+ // Convert {start} to a relative index.
+ var_start = ConvertToRelativeIndex(context, start, string_length);
// Conversions and bounds-checks for {length}.
Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
@@ -1897,7 +1836,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
Branch(IsUndefined(length), &if_isundefined, &if_isnotundefined);
BIND(&if_isundefined);
- var_length = string_length;
+ var_length = SmiTag(string_length);
Goto(&if_issmi);
BIND(&if_isnotundefined);
@@ -1905,18 +1844,20 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
CodeStubAssembler::kTruncateMinusZero);
}
- TVARIABLE(Smi, var_result_length);
+ TVARIABLE(IntPtrT, var_result_length);
- Branch(TaggedIsSmi(var_length), &if_issmi, &if_isheapnumber);
+ Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
// Set {length} to min(max({length}, 0), {string_length} - {start}
BIND(&if_issmi);
{
- TNode<Smi> const positive_length = SmiMax(CAST(var_length), zero);
- TNode<Smi> const minimal_length = SmiSub(string_length, var_start);
- var_result_length = SmiMin(positive_length, minimal_length);
+ TNode<IntPtrT> const positive_length =
+ IntPtrMax(SmiUntag(CAST(var_length.value())), zero);
+ TNode<IntPtrT> const minimal_length =
+ IntPtrSub(string_length, var_start.value());
+ var_result_length = IntPtrMin(positive_length, minimal_length);
- GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
+ GotoIfNot(IntPtrLessThanOrEqual(var_result_length.value(), zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
@@ -1926,11 +1867,12 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(this, IsHeapNumber(var_length));
+ CSA_ASSERT(this, IsHeapNumber(var_length.value()));
Label if_isnegative(this), if_ispositive(this);
TNode<Float64T> const float_zero = Float64Constant(0.);
- TNode<Float64T> const length_float = LoadHeapNumberValue(CAST(var_length));
+ TNode<Float64T> const length_float =
+ LoadHeapNumberValue(CAST(var_length.value()));
Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
&if_ispositive);
@@ -1939,17 +1881,17 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
BIND(&if_ispositive);
{
- var_result_length = SmiSub(string_length, var_start);
- GotoIfNot(SmiLessThanOrEqual(var_result_length, zero), &out);
+ var_result_length = IntPtrSub(string_length, var_start.value());
+ GotoIfNot(IntPtrLessThanOrEqual(var_result_length.value(), zero), &out);
args.PopAndReturn(EmptyStringConstant());
}
}
BIND(&out);
{
- TNode<Smi> const end = SmiAdd(var_start, var_result_length);
- Node* const result = SubString(context, string, var_start, end);
- args.PopAndReturn(result);
+ TNode<IntPtrT> const end =
+ IntPtrAdd(var_start.value(), var_result_length.value());
+ args.PopAndReturn(SubString(string, var_start.value(), end));
}
}
@@ -1959,7 +1901,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
Label out(this);
TVARIABLE(Smi, var_result);
- TNode<Object> const value_int =
+ TNode<Number> const value_int =
ToInteger_Inline(context, value, CodeStubAssembler::kTruncateMinusZero);
Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
@@ -1967,8 +1909,9 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
BIND(&if_issmi);
{
+ TNode<Smi> value_smi = CAST(value_int);
Label if_isinbounds(this), if_isoutofbounds(this, Label::kDeferred);
- Branch(SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+ Branch(SmiAbove(value_smi, limit), &if_isoutofbounds, &if_isinbounds);
BIND(&if_isinbounds);
{
@@ -1980,7 +1923,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
{
TNode<Smi> const zero = SmiConstant(0);
var_result =
- SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit);
+ SelectTaggedConstant(SmiLessThan(value_smi, zero), zero, limit);
Goto(&out);
}
}
@@ -1999,16 +1942,15 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
}
BIND(&out);
- return var_result;
+ return var_result.value();
}
-TF_BUILTIN(SubString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* string = Parameter(Descriptor::kString);
- Node* from = Parameter(Descriptor::kFrom);
- Node* to = Parameter(Descriptor::kTo);
+TF_BUILTIN(StringSubstring, CodeStubAssembler) {
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom));
+ TNode<IntPtrT> to = UncheckedCast<IntPtrT>(Parameter(Descriptor::kTo));
- Return(SubString(context, string, from, to));
+ Return(SubString(string, from, to));
}
// ES6 #sec-string.prototype.substring
@@ -2031,7 +1973,7 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
VARIABLE(var_end, MachineRepresentation::kTagged);
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string =
+ TNode<String> const string =
ToThisString(context, receiver, "String.prototype.substring");
Node* const length = LoadStringLengthAsSmi(string);
@@ -2061,9 +2003,8 @@ TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
BIND(&out);
{
- Node* result =
- SubString(context, string, var_start.value(), var_end.value());
- args.PopAndReturn(result);
+ args.PopAndReturn(SubString(string, SmiUntag(var_start.value()),
+ SmiUntag(var_end.value())));
}
}
@@ -2072,14 +2013,14 @@ TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
Generate(String::kTrim, "String.prototype.trim");
}
-// Non-standard WebKit extension
-TF_BUILTIN(StringPrototypeTrimLeft, StringTrimAssembler) {
- Generate(String::kTrimLeft, "String.prototype.trimLeft");
+// https://github.com/tc39/proposal-string-left-right-trim
+TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
+ Generate(String::kTrimStart, "String.prototype.trimLeft");
}
-// Non-standard WebKit extension
-TF_BUILTIN(StringPrototypeTrimRight, StringTrimAssembler) {
- Generate(String::kTrimRight, "String.prototype.trimRight");
+// https://github.com/tc39/proposal-string-left-right-trim
+TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
+ Generate(String::kTrimEnd, "String.prototype.trimRight");
}
void StringTrimAssembler::Generate(String::TrimMode mode,
@@ -2092,7 +2033,7 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
Node* const receiver = arguments.GetReceiver();
// Check that {receiver} is coercible to Object and convert it to a String.
- Node* const string = ToThisString(context, receiver, method_name);
+ TNode<String> const string = ToThisString(context, receiver, method_name);
TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
ToDirectStringAssembler to_direct(state(), string);
@@ -2105,20 +2046,20 @@ void StringTrimAssembler::Generate(String::TrimMode mode,
TVARIABLE(IntPtrT, var_start, IntPtrConstant(0));
TVARIABLE(IntPtrT, var_end, IntPtrSub(string_length, IntPtrConstant(1)));
- if (mode == String::kTrimLeft || mode == String::kTrim) {
+ if (mode == String::kTrimStart || mode == String::kTrim) {
ScanForNonWhiteSpaceOrLineTerminator(string_data, string_data_offset,
is_stringonebyte, &var_start,
string_length, 1, &return_emptystring);
}
- if (mode == String::kTrimRight || mode == String::kTrim) {
+ if (mode == String::kTrimEnd || mode == String::kTrim) {
ScanForNonWhiteSpaceOrLineTerminator(
string_data, string_data_offset, is_stringonebyte, &var_end,
IntPtrConstant(-1), -1, &return_emptystring);
}
- arguments.PopAndReturn(SubString(context, string, SmiTag(var_start),
- SmiAdd(SmiTag(var_end), SmiConstant(1)),
- SubStringFlags::FROM_TO_ARE_BOUNDED));
+ arguments.PopAndReturn(
+ SubString(string, var_start.value(),
+ IntPtrAdd(var_end.value(), IntPtrConstant(1))));
BIND(&if_runtime);
arguments.PopAndReturn(
@@ -2281,21 +2222,21 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
var_result = StringCharCodeAt(string, index);
var_trail = Int32Constant(0);
- GotoIf(Word32NotEqual(Word32And(var_result, Int32Constant(0xFC00)),
+ GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&return_result);
TNode<IntPtrT> next_index = IntPtrAdd(index, IntPtrConstant(1));
GotoIfNot(IntPtrLessThan(next_index, length), &return_result);
var_trail = StringCharCodeAt(string, next_index);
- Branch(Word32Equal(Word32And(var_trail, Int32Constant(0xFC00)),
+ Branch(Word32Equal(Word32And(var_trail.value(), Int32Constant(0xFC00)),
Int32Constant(0xDC00)),
&handle_surrogate_pair, &return_result);
BIND(&handle_surrogate_pair);
{
- TNode<Int32T> lead = var_result;
- TNode<Int32T> trail = var_trail;
+ TNode<Int32T> lead = var_result.value();
+ TNode<Int32T> trail = var_trail.value();
// Check that this path is only taken if a surrogate pair is found
CSA_SLOW_ASSERT(this,
@@ -2332,7 +2273,7 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
}
BIND(&return_result);
- return var_result;
+ return var_result.value();
}
// ES6 #sec-%stringiteratorprototype%.next
@@ -2383,9 +2324,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
BIND(&throw_bad_receiver);
{
// The {receiver} is not a valid JSGeneratorObject.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant("String Iterator.prototype.next"), iterator);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("String Iterator.prototype.next"), iterator);
}
}
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index b830a8597d..2a4f23b003 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -2,9 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-typedarray-gen.h"
+
+#include "src/builtins/builtins-constructor-gen.h"
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/builtins/growable-fixed-array-gen.h"
#include "src/handles-inl.h"
namespace v8 {
@@ -23,106 +27,22 @@ using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
-class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver,
- const char* method_name,
- int object_offset);
- void GenerateTypedArrayPrototypeIterationMethod(Node* context, Node* receiver,
- const char* method_name,
- IterationKind iteration_kind);
-
- void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
- TNode<Number> byte_offset, TNode<Number> byte_length);
- void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
- TNode<Map> map, TNode<Smi> length,
- TNode<Number> byte_offset);
-
- TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
- TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
- TNode<Number> byte_offset);
- Node* LoadDataPtr(Node* typed_array);
- TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
-
- // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
- TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
-
- // Loads the element kind of TypedArray instance.
- TNode<Word32T> LoadElementsKind(TNode<Object> typed_array);
-
- // Returns the byte size of an element for a TypedArray elements kind.
- TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
-
- // Fast path for setting a TypedArray (source) onto another TypedArray
- // (target) at an element offset.
- void SetTypedArraySource(TNode<Context> context, TNode<JSTypedArray> source,
- TNode<JSTypedArray> target, TNode<IntPtrT> offset,
- Label* call_runtime, Label* if_source_too_large);
-
- void SetJSArraySource(TNode<Context> context, TNode<JSArray> source,
- TNode<JSTypedArray> target, TNode<IntPtrT> offset,
- Label* call_runtime, Label* if_source_too_large);
-
- void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
- TNode<IntPtrT> byte_length);
-
- void CallCCopyFastNumberJSArrayElementsToTypedArray(
- TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
- TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
-
- void CallCCopyTypedArrayElementsToTypedArray(TNode<JSTypedArray> source,
- TNode<JSTypedArray> dest,
- TNode<IntPtrT> source_length,
- TNode<IntPtrT> offset);
-};
-
TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
TNode<JSTypedArray> array) {
- Label unreachable(this), done(this);
- Label uint8_elements(this), uint8_clamped_elements(this), int8_elements(this),
- uint16_elements(this), int16_elements(this), uint32_elements(this),
- int32_elements(this), float32_elements(this), float64_elements(this);
- Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_clamped_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements};
- int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
- const size_t kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
-
TVARIABLE(Map, var_typed_map);
-
TNode<Map> array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
- Switch(elements_kind, &unreachable, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
- for (int i = 0; i < static_cast<int>(kTypedElementsKindCount); i++) {
- BIND(elements_kind_labels[i]);
- {
- ElementsKind kind = static_cast<ElementsKind>(elements_kinds[i]);
- ExternalArrayType type =
- isolate()->factory()->GetArrayTypeFromElementsKind(kind);
- Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
- var_typed_map = HeapConstant(map);
- Goto(&done);
- }
- }
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ ExternalArrayType type =
+ isolate()->factory()->GetArrayTypeFromElementsKind(kind);
+ Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
+ var_typed_map = HeapConstant(map);
+ });
- BIND(&unreachable);
- { Unreachable(); }
- BIND(&done);
- return var_typed_map;
+ return var_typed_map.value();
}
// The byte_offset can be higher than Smi range, in which case to perform the
@@ -218,7 +138,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
Label setup_holder(this), allocate_on_heap(this), aligned(this),
allocate_elements(this), allocate_off_heap(this),
allocate_off_heap_no_init(this), attach_buffer(this), done(this);
- VARIABLE(var_total_size, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, var_total_size);
// SmiMul returns a heap number in case of Smi overflow.
TNode<Number> byte_length = SmiMul(length, element_size);
@@ -227,10 +147,12 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
TNode<Map> fixed_typed_map = LoadMapForType(holder);
GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
- GotoIf(
- SmiGreaterThan(byte_length, SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
- &allocate_off_heap);
- TNode<IntPtrT> word_byte_length = SmiToWord(CAST(byte_length));
+ // The goto above ensures that byte_length is a Smi.
+ TNode<Smi> smi_byte_length = CAST(byte_length);
+ GotoIf(SmiGreaterThan(smi_byte_length,
+ SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)),
+ &allocate_off_heap);
+ TNode<IntPtrT> word_byte_length = SmiToIntPtr(smi_byte_length);
Goto(&allocate_on_heap);
BIND(&allocate_on_heap);
@@ -281,17 +203,18 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
// Fix alignment if needed.
DCHECK_EQ(0, FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask);
- Node* aligned_header_size =
+ TNode<IntPtrT> aligned_header_size =
IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask);
- Node* size = IntPtrAdd(word_byte_length, aligned_header_size);
- var_total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask)));
+ TNode<IntPtrT> size = IntPtrAdd(word_byte_length, aligned_header_size);
+ var_total_size = WordAnd(size, IntPtrConstant(~kObjectAlignmentMask));
Goto(&allocate_elements);
}
BIND(&aligned);
{
- Node* header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
- var_total_size.Bind(IntPtrAdd(word_byte_length, header_size));
+ TNode<IntPtrT> header_size =
+ IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
+ var_total_size = IntPtrAdd(word_byte_length, header_size);
Goto(&allocate_elements);
}
@@ -359,7 +282,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
BIND(&attach_buffer);
{
- AttachBuffer(holder, var_buffer, fixed_typed_map, length, byte_offset);
+ AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length,
+ byte_offset);
Goto(&done);
}
@@ -368,49 +292,44 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
}
// ES6 #sec-typedarray-length
-TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- TNode<Object> maybe_length = CAST(Parameter(Descriptor::kLength));
- TNode<Object> element_size = CAST(Parameter(Descriptor::kElementSize));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
+void TypedArrayBuiltinsAssembler::ConstructByLength(TNode<Context> context,
+ TNode<JSTypedArray> holder,
+ TNode<Object> length,
+ TNode<Smi> element_size) {
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
- Label invalid_length(this);
+ Label invalid_length(this, Label::kDeferred), done(this);
- TNode<Number> length = ToInteger_Inline(
- context, maybe_length, CodeStubAssembler::kTruncateMinusZero);
+ TNode<Number> converted_length =
+ ToInteger_Inline(context, length, CodeStubAssembler::kTruncateMinusZero);
// The maximum length of a TypedArray is MaxSmi().
// Note: this is not per spec, but rather a constraint of our current
- // representation (which uses smi's).
- GotoIf(TaggedIsNotSmi(length), &invalid_length);
- GotoIf(SmiLessThan(length, SmiConstant(0)), &invalid_length);
-
- CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
- element_size, TrueConstant());
- Return(UndefinedConstant());
+ // representation (which uses Smis).
+ GotoIf(TaggedIsNotSmi(converted_length), &invalid_length);
+ // The goto above ensures that byte_length is a Smi.
+ TNode<Smi> smi_converted_length = CAST(converted_length);
+ GotoIf(SmiLessThan(smi_converted_length, SmiConstant(0)), &invalid_length);
+
+ Node* initialize = TrueConstant();
+ CallBuiltin(Builtins::kTypedArrayInitialize, context, holder,
+ converted_length, element_size, initialize);
+ Goto(&done);
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength), length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ converted_length);
}
+
+ BIND(&done);
}
// ES6 #sec-typedarray-buffer-byteoffset-length
-TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* buffer = Parameter(Descriptor::kBuffer);
- TNode<Object> byte_offset = CAST(Parameter(Descriptor::kByteOffset));
- Node* length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- CSA_ASSERT(this, IsJSTypedArray(holder));
- CSA_ASSERT(this, IsJSArrayBuffer(buffer));
+void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer, TNode<Object> byte_offset,
+ TNode<Object> length, TNode<Smi> element_size) {
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
VARIABLE(new_byte_length, MachineRepresentation::kTagged, SmiConstant(0));
@@ -421,7 +340,8 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
invalid_offset_error(this, Label::kDeferred);
Label offset_is_smi(this), offset_not_smi(this, Label::kDeferred),
check_length(this), call_init(this), invalid_length(this),
- length_undefined(this), length_defined(this), detached_error(this);
+ length_undefined(this), length_defined(this), detached_error(this),
+ done(this);
GotoIf(IsUndefined(byte_offset), &check_length);
@@ -477,7 +397,7 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&length_defined);
{
- Node* new_length = ToSmiIndex(length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(length, context, &invalid_length);
GotoIf(IsDetachedBuffer(buffer), &detached_error);
new_byte_length.Bind(SmiMul(new_length, element_size));
// Reading the byte length must come after the ToIndex operation, which
@@ -495,22 +415,18 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&call_init);
{
- Node* new_length = CallBuiltin(Builtins::kDivide, context,
- new_byte_length.value(), element_size);
+ TNode<Object> raw_length = CallBuiltin(
+ Builtins::kDivide, context, new_byte_length.value(), element_size);
// Force the result into a Smi, or throw a range error if it doesn't fit.
- new_length = ToSmiIndex(new_length, context, &invalid_length);
+ TNode<Smi> new_length = ToSmiIndex(raw_length, context, &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitializeWithBuffer, context, holder,
new_length, buffer, element_size, offset.value());
- Return(UndefinedConstant());
+ Goto(&done);
}
BIND(&invalid_offset_error);
- {
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidOffset), byte_offset);
- Unreachable();
- }
+ { ThrowRangeError(context, MessageTemplate::kInvalidOffset, byte_offset); }
BIND(&start_offset_error);
{
@@ -534,24 +450,84 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength), length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength, length);
}
BIND(&detached_error);
{ ThrowTypeError(context, MessageTemplate::kDetachedOperation, "Construct"); }
+
+ BIND(&done);
+}
+
+void TypedArrayBuiltinsAssembler::ConstructByTypedArray(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSTypedArray> typed_array, TNode<Smi> element_size) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
+
+ TNode<JSFunction> const default_constructor = CAST(LoadContextElement(
+ LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX));
+
+ Label construct(this), if_detached(this), if_notdetached(this),
+ check_for_sab(this), if_buffernotshared(this), check_prototype(this),
+ done(this);
+ TVARIABLE(JSReceiver, buffer_constructor, default_constructor);
+
+ TNode<JSArrayBuffer> source_buffer = LoadObjectField<JSArrayBuffer>(
+ typed_array, JSArrayBufferView::kBufferOffset);
+ Branch(IsDetachedBuffer(source_buffer), &if_detached, &if_notdetached);
+
+ // TODO(petermarshall): Throw on detached typedArray.
+ TVARIABLE(Smi, source_length);
+ BIND(&if_detached);
+ source_length = SmiConstant(0);
+ Goto(&check_for_sab);
+
+ BIND(&if_notdetached);
+ source_length =
+ CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset));
+ Goto(&check_for_sab);
+
+ // The spec requires that constructing a typed array using a SAB-backed typed
+ // array use the ArrayBuffer constructor, not the species constructor. See
+ // https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
+ BIND(&check_for_sab);
+ TNode<Uint32T> bitfield =
+ LoadObjectField<Uint32T>(source_buffer, JSArrayBuffer::kBitFieldOffset);
+ Branch(IsSetWord32<JSArrayBuffer::IsShared>(bitfield), &construct,
+ &if_buffernotshared);
+
+ BIND(&if_buffernotshared);
+ {
+ buffer_constructor =
+ CAST(SpeciesConstructor(context, source_buffer, default_constructor));
+ // TODO(petermarshall): Throw on detached typedArray.
+ GotoIfNot(IsDetachedBuffer(source_buffer), &construct);
+ source_length = SmiConstant(0);
+ Goto(&construct);
+ }
+
+ BIND(&construct);
+ {
+ ConstructByArrayLike(context, holder, typed_array, source_length.value(),
+ element_size);
+ TNode<Object> proto = GetProperty(context, buffer_constructor.value(),
+ PrototypeStringConstant());
+ // TODO(petermarshall): Correct for realm as per 9.1.14 step 4.
+ TNode<JSArrayBuffer> buffer = LoadObjectField<JSArrayBuffer>(
+ holder, JSArrayBufferView::kBufferOffset);
+ CallRuntime(Runtime::kInternalSetPrototype, context, buffer, proto);
+
+ Goto(&done);
+ }
+
+ BIND(&done);
}
Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) {
CSA_ASSERT(this, IsJSTypedArray(typed_array));
Node* elements = LoadElements(typed_array);
CSA_ASSERT(this, IsFixedTypedArray(elements));
- Node* base_pointer = BitcastTaggedToWord(
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset));
- Node* external_pointer = BitcastTaggedToWord(
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset));
- return IntPtrAdd(base_pointer, external_pointer);
+ return LoadFixedTypedArrayBackingStore(CAST(elements));
}
TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
@@ -574,28 +550,24 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid(
Goto(&done);
BIND(&done);
- return is_valid;
+ return is_valid.value();
}
-TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
- Node* holder = Parameter(Descriptor::kHolder);
- Node* array_like = Parameter(Descriptor::kArrayLike);
- Node* initial_length = Parameter(Descriptor::kLength);
- Node* element_size = Parameter(Descriptor::kElementSize);
- CSA_ASSERT(this, TaggedIsSmi(element_size));
- Node* context = Parameter(Descriptor::kContext);
-
+void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<HeapObject> array_like, TNode<Object> initial_length,
+ TNode<Smi> element_size) {
Node* initialize = FalseConstant();
- Label invalid_length(this), fill(this), fast_copy(this);
+ Label invalid_length(this), fill(this), fast_copy(this), done(this);
// The caller has looked up length on array_like, which is observable.
- Node* length = ToSmiLength(initial_length, context, &invalid_length);
+ TNode<Smi> length = ToSmiLength(initial_length, context, &invalid_length);
CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
element_size, initialize);
GotoIf(SmiNotEqual(length, SmiConstant(0)), &fill);
- Return(UndefinedConstant());
+ Goto(&done);
BIND(&fill);
TNode<Int32T> holder_kind = LoadMapElementsKind(LoadMap(holder));
@@ -605,7 +577,7 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
// Copy using the elements accessor.
CallRuntime(Runtime::kTypedArrayCopyElements, context, holder, array_like,
length);
- Return(UndefinedConstant());
+ Goto(&done);
BIND(&fast_copy);
{
@@ -632,16 +604,117 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::UintPtr(), memcpy,
holder_data_ptr, source_data_ptr, byte_length_intptr);
- Return(UndefinedConstant());
+ Goto(&done);
}
BIND(&invalid_length);
{
- CallRuntime(Runtime::kThrowRangeError, context,
- SmiConstant(MessageTemplate::kInvalidTypedArrayLength),
- initial_length);
- Unreachable();
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ initial_length);
+ }
+
+ BIND(&done);
+}
+
+void TypedArrayBuiltinsAssembler::ConstructByIterable(
+ TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSReceiver> iterable, TNode<Object> iterator_fn,
+ TNode<Smi> element_size) {
+ CSA_ASSERT(this, IsCallable(iterator_fn));
+ Label fast_path(this), slow_path(this), done(this);
+
+ TNode<JSArray> array_like = CAST(
+ CallBuiltin(Builtins::kIterableToList, context, iterable, iterator_fn));
+ TNode<Object> initial_length = LoadJSArrayLength(array_like);
+ ConstructByArrayLike(context, holder, array_like, initial_length,
+ element_size);
+}
+
+TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ // If NewTarget is undefined, throw a TypeError exception.
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* shared = LoadObjectField(target, JSFunction::kSharedFunctionInfoOffset);
+ Node* name = LoadObjectField(shared, SharedFunctionInfo::kNameOffset);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, name);
+}
+
+TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
+ Label if_arg1isbuffer(this), if_arg1istypedarray(this),
+ if_arg1isreceiver(this), if_arg1isnumber(this), done(this);
+
+ TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
+ CSA_ASSERT(this, IsNotUndefined(new_target));
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
+ TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
+ TNode<Object> arg3 = args.GetOptionalArgumentValue(2);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ TNode<JSTypedArray> holder = CAST(
+ constructor_assembler.EmitFastNewObject(context, target, new_target));
+
+ TNode<Smi> element_size =
+ SmiTag(GetTypedArrayElementSize(LoadElementsKind(holder)));
+
+ GotoIf(TaggedIsSmi(arg1), &if_arg1isnumber);
+ GotoIf(IsJSArrayBuffer(arg1), &if_arg1isbuffer);
+ GotoIf(IsJSTypedArray(arg1), &if_arg1istypedarray);
+ GotoIf(IsJSReceiver(arg1), &if_arg1isreceiver);
+ Goto(&if_arg1isnumber);
+
+ BIND(&if_arg1isbuffer);
+ ConstructByArrayBuffer(context, holder, CAST(arg1), arg2, arg3, element_size);
+ Goto(&done);
+
+ BIND(&if_arg1istypedarray);
+ TNode<JSTypedArray> typed_array = CAST(arg1);
+ ConstructByTypedArray(context, holder, typed_array, element_size);
+ Goto(&done);
+
+ BIND(&if_arg1isreceiver);
+ {
+ Label if_iteratorundefined(this), if_iteratornotcallable(this);
+ // Get iterator symbol
+ TNode<Object> iteratorFn =
+ CAST(GetMethod(context, arg1, isolate()->factory()->iterator_symbol(),
+ &if_iteratorundefined));
+ GotoIf(TaggedIsSmi(iteratorFn), &if_iteratornotcallable);
+ GotoIfNot(IsCallable(iteratorFn), &if_iteratornotcallable);
+
+ ConstructByIterable(context, holder, CAST(arg1), iteratorFn, element_size);
+ Goto(&done);
+
+ BIND(&if_iteratorundefined);
+ {
+ TNode<HeapObject> array_like = CAST(arg1);
+ TNode<Object> initial_length =
+ GetProperty(context, arg1, LengthStringConstant());
+
+ ConstructByArrayLike(context, holder, array_like, initial_length,
+ element_size);
+ Goto(&done);
+ }
+
+ BIND(&if_iteratornotcallable);
+ { ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable); }
}
+
+ // First arg was a number or fell through and will be treated as a number.
+ BIND(&if_arg1isnumber);
+ ConstructByLength(context, holder, arg1, element_size);
+ Goto(&done);
+
+ BIND(&done);
+ args.PopAndReturn(holder);
}
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
@@ -668,9 +741,8 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
BIND(&receiver_is_incompatible);
{
// The {receiver} is not a valid JSTypedArray.
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), receiver);
}
}
@@ -707,57 +779,156 @@ TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)));
}
+TNode<Word32T> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
+ TNode<Word32T> kind) {
+ return Word32Or(Word32Equal(kind, Int32Constant(BIGINT64_ELEMENTS)),
+ Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS)));
+}
+
TNode<Word32T> TypedArrayBuiltinsAssembler::LoadElementsKind(
- TNode<Object> typed_array) {
- CSA_ASSERT(this, IsJSTypedArray(typed_array));
- return LoadMapElementsKind(LoadMap(CAST(typed_array)));
+ TNode<JSTypedArray> typed_array) {
+ return LoadMapElementsKind(LoadMap(typed_array));
}
TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
TNode<Word32T> elements_kind) {
TVARIABLE(IntPtrT, element_size);
- Label next(this), if_unknown_type(this, Label::kDeferred);
- size_t const kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind el_kind, int size, int typed_array_fun_index) {
+ element_size = IntPtrConstant(size);
+ });
- int32_t elements_kinds[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
+ return element_size.value();
+}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- Label if_##type##array(this);
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+TNode<Object> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
+ TNode<Context> context, TNode<JSTypedArray> exemplar) {
+ TVARIABLE(IntPtrT, context_slot);
+ TNode<Word32T> elements_kind = LoadElementsKind(exemplar);
- Label* elements_kind_labels[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind el_kind, int size, int typed_array_function_index) {
+ context_slot = IntPtrConstant(typed_array_function_index);
+ });
- Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
+ return LoadContextElement(LoadNativeContext(context), context_slot.value());
+}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- BIND(&if_##type##array); \
- { \
- element_size = IntPtrConstant(size); \
- Goto(&next); \
- }
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+TNode<Object> TypedArrayBuiltinsAssembler::TypedArraySpeciesConstructor(
+ TNode<Context> context, TNode<JSTypedArray> exemplar) {
+ TVARIABLE(Object, var_constructor);
+ Label slow(this), done(this);
- BIND(&if_unknown_type);
+ // Let defaultConstructor be the intrinsic object listed in column one of
+ // Table 52 for exemplar.[[TypedArrayName]].
+ TNode<Object> default_constructor = GetDefaultConstructor(context, exemplar);
+
+ var_constructor = default_constructor;
+ Node* map = LoadMap(exemplar);
+ GotoIfNot(IsPrototypeTypedArrayPrototype(context, map), &slow);
+ Branch(IsSpeciesProtectorCellInvalid(), &slow, &done);
+
+ BIND(&slow);
+ var_constructor =
+ CAST(SpeciesConstructor(context, exemplar, default_constructor));
+ Goto(&done);
+
+ BIND(&done);
+ return var_constructor.value();
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByArrayBuffer(
+ TNode<Context> context, TNode<JSTypedArray> exemplar,
+ TNode<JSArrayBuffer> buffer, TNode<Number> byte_offset, TNode<Smi> len,
+ const char* method_name) {
+ // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ TNode<Object> constructor = TypedArraySpeciesConstructor(context, exemplar);
+
+ // Let newTypedArray be ? Construct(constructor, argumentList).
+ TNode<Object> new_object =
+ CAST(ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
+ buffer, byte_offset, len));
+
+ // Perform ? ValidateTypedArray(newTypedArray).
+ return ValidateTypedArray(context, new_object, method_name);
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::SpeciesCreateByLength(
+ TNode<Context> context, TNode<JSTypedArray> exemplar, TNode<Smi> len,
+ const char* method_name) {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(len));
+
+ // Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ TNode<Object> constructor = TypedArraySpeciesConstructor(context, exemplar);
+ CSA_ASSERT(this, IsJSFunction(constructor));
+
+ return CreateByLength(context, constructor, len, method_name);
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::CreateByLength(
+ TNode<Context> context, TNode<Object> constructor, TNode<Smi> len,
+ const char* method_name) {
+ // Let newTypedArray be ? Construct(constructor, argumentList).
+ TNode<Object> new_object = CAST(ConstructJS(CodeFactory::Construct(isolate()),
+ context, constructor, len));
+
+ // Perform ? ValidateTypedArray(newTypedArray).
+ TNode<JSTypedArray> new_typed_array =
+ ValidateTypedArray(context, new_object, method_name);
+
+ // If newTypedArray.[[ArrayLength]] < argumentList[0], throw a TypeError
+ // exception.
+ Label if_length_is_not_short(this);
+ TNode<Smi> new_length =
+ LoadObjectField<Smi>(new_typed_array, JSTypedArray::kLengthOffset);
+ GotoIfNot(SmiLessThan(new_length, len), &if_length_is_not_short);
+ ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort);
+
+ BIND(&if_length_is_not_short);
+ return new_typed_array;
+}
+
+TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
+ TNode<Context> context, TNode<JSTypedArray> array) {
+ Label call_runtime(this), done(this);
+ TVARIABLE(Object, var_result);
+
+ TNode<Object> buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &call_runtime);
+ TNode<UintPtrT> backing_store = LoadObjectField<UintPtrT>(
+ CAST(buffer), JSArrayBuffer::kBackingStoreOffset);
+ GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
+ var_result = buffer;
+ Goto(&done);
+
+ BIND(&call_runtime);
{
- element_size = IntPtrConstant(0);
- Goto(&next);
+ var_result = CallRuntime(Runtime::kTypedArrayGetBuffer, context, array);
+ Goto(&done);
}
- BIND(&next);
- return element_size;
+
+ BIND(&done);
+ return CAST(var_result.value());
+}
+
+TNode<JSTypedArray> TypedArrayBuiltinsAssembler::ValidateTypedArray(
+ TNode<Context> context, TNode<Object> obj, const char* method_name) {
+ Label validation_done(this);
+
+ // If it is not a typed array, throw
+ ThrowIfNotInstanceType(context, obj, JS_TYPED_ARRAY_TYPE, method_name);
+
+ // If the typed array's buffer is detached, throw
+ TNode<Object> buffer =
+ LoadObjectField(CAST(obj), JSTypedArray::kBufferOffset);
+ GotoIfNot(IsDetachedBuffer(buffer), &validation_done);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&validation_done);
+ return CAST(obj);
}
void TypedArrayBuiltinsAssembler::SetTypedArraySource(
@@ -801,7 +972,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
CSA_ASSERT(this,
UintPtrGreaterThanOrEqual(source_byte_length, IntPtrConstant(0)));
- Label call_memmove(this), fast_c_call(this), out(this);
+ Label call_memmove(this), fast_c_call(this), out(this), exception(this);
// A fast memmove call can be used when the source and target types are are
// the same or either Uint8 or Uint8Clamped.
@@ -823,6 +994,10 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
this, UintPtrGreaterThanOrEqual(
IntPtrMul(target_length, target_el_size), IntPtrConstant(0)));
+ GotoIf(Word32NotEqual(IsBigInt64ElementsKind(source_el_kind),
+ IsBigInt64ElementsKind(target_el_kind)),
+ &exception);
+
TNode<IntPtrT> source_length =
LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset);
CallCCopyTypedArrayElementsToTypedArray(source, target, source_length,
@@ -830,6 +1005,9 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
Goto(&out);
}
+ BIND(&exception);
+ ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
+
BIND(&out);
}
@@ -871,6 +1049,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
}
BIND(&fast_c_call);
+ GotoIf(IsBigInt64ElementsKind(LoadElementsKind(target)), call_runtime);
CallCCopyFastNumberJSArrayElementsToTypedArray(context, source, target,
source_length, offset);
Goto(&out);
@@ -893,6 +1072,7 @@ void TypedArrayBuiltinsAssembler::
TNode<JSTypedArray> dest,
TNode<IntPtrT> source_length,
TNode<IntPtrT> offset) {
+ CSA_ASSERT(this, Word32Not(IsBigInt64ElementsKind(LoadElementsKind(dest))));
TNode<ExternalReference> f = ExternalConstant(
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
isolate()));
@@ -913,6 +1093,56 @@ void TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsToTypedArray(
offset);
}
+void TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
+ TNode<JSTypedArray> source, TNode<JSTypedArray> dest, TNode<IntPtrT> start,
+ TNode<IntPtrT> end) {
+ TNode<ExternalReference> f = ExternalConstant(
+ ExternalReference::copy_typed_array_elements_slice(isolate()));
+ CallCFunction4(MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::UintPtr(),
+ MachineType::UintPtr(), f, source, dest, start, end);
+}
+
+void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
+ TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function) {
+ Label next(this), if_unknown_type(this, Label::kDeferred);
+
+ int32_t elements_kinds[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ Label if_##type##array(this);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ Label* elements_kind_labels[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+ STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
+
+ Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
+ arraysize(elements_kinds));
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, size, Context::TYPE##_ARRAY_FUN_INDEX); \
+ Goto(&next); \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ BIND(&if_unknown_type);
+ Unreachable();
+
+ BIND(&next);
+}
+
// ES #sec-get-%typedarray%.prototype.set
TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
@@ -998,6 +1228,193 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
ThrowTypeError(context, MessageTemplate::kNotTypedArray);
}
+// ES %TypedArray%.prototype.slice
+TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.slice";
+ Label call_c(this), call_memmove(this), if_count_is_not_zero(this),
+ if_typed_array_is_neutered(this, Label::kDeferred),
+ if_bigint_mixed_types(this, Label::kDeferred);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSTypedArray> source =
+ ValidateTypedArray(context, receiver, method_name);
+
+ TNode<Smi> source_length =
+ LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // Convert start offset argument to integer, and calculate relative offset.
+ TNode<Object> start = args.GetOptionalArgumentValue(0, SmiConstant(0));
+ TNode<Smi> start_index =
+ SmiTag(ConvertToRelativeIndex(context, start, SmiUntag(source_length)));
+
+ // Convert end offset argument to integer, and calculate relative offset.
+ // If end offset is not given or undefined is given, set source_length to
+ // "end_index".
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ TNode<Smi> end_index =
+ Select<Smi>(IsUndefined(end), [=] { return source_length; },
+ [=] {
+ return SmiTag(ConvertToRelativeIndex(
+ context, end, SmiUntag(source_length)));
+ },
+ MachineRepresentation::kTagged);
+
+ // Create a result array by invoking TypedArraySpeciesCreate.
+ TNode<Smi> count = SmiMax(SmiSub(end_index, start_index), SmiConstant(0));
+ TNode<JSTypedArray> result_array =
+ SpeciesCreateByLength(context, source, count, method_name);
+
+ // If count is zero, return early.
+ GotoIf(SmiGreaterThan(count, SmiConstant(0)), &if_count_is_not_zero);
+ args.PopAndReturn(result_array);
+
+ BIND(&if_count_is_not_zero);
+ // Check the source array is neutered or not. We don't need to check if the
+ // result array is neutered or not since TypedArraySpeciesCreate checked it.
+ CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField(
+ result_array, JSTypedArray::kBufferOffset))));
+ TNode<Object> receiver_buffer =
+ LoadObjectField(CAST(receiver), JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(receiver_buffer), &if_typed_array_is_neutered);
+
+ // result_array could be a different type from source or share the same
+ // buffer with the source because of custom species constructor.
+ // If the types of source and result array are the same and they are not
+ // sharing the same buffer, use memmove.
+ TNode<Word32T> source_el_kind = LoadElementsKind(source);
+ TNode<Word32T> target_el_kind = LoadElementsKind(result_array);
+ GotoIfNot(Word32Equal(source_el_kind, target_el_kind), &call_c);
+
+ TNode<Object> target_buffer =
+ LoadObjectField(result_array, JSTypedArray::kBufferOffset);
+ Branch(WordEqual(receiver_buffer, target_buffer), &call_c, &call_memmove);
+
+ BIND(&call_memmove);
+ {
+ GotoIfForceSlowPath(&call_c);
+
+ TNode<IntPtrT> target_data_ptr =
+ UncheckedCast<IntPtrT>(LoadDataPtr(result_array));
+ TNode<IntPtrT> source_data_ptr =
+ UncheckedCast<IntPtrT>(LoadDataPtr(source));
+
+ TNode<IntPtrT> source_el_size = GetTypedArrayElementSize(source_el_kind);
+ TNode<IntPtrT> source_start_bytes =
+ IntPtrMul(SmiToIntPtr(start_index), source_el_size);
+ TNode<IntPtrT> source_start =
+ IntPtrAdd(source_data_ptr, source_start_bytes);
+
+ TNode<IntPtrT> count_bytes = IntPtrMul(SmiToIntPtr(count), source_el_size);
+
+#ifdef DEBUG
+ TNode<IntPtrT> target_byte_length =
+ LoadAndUntagObjectField(result_array, JSTypedArray::kByteLengthOffset);
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, target_byte_length));
+
+ TNode<IntPtrT> source_byte_length =
+ LoadAndUntagObjectField(source, JSTypedArray::kByteLengthOffset);
+ TNode<IntPtrT> source_size_in_bytes =
+ IntPtrSub(source_byte_length, source_start_bytes);
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(count_bytes, source_size_in_bytes));
+#endif // DEBUG
+
+ CallCMemmove(target_data_ptr, source_start, count_bytes);
+ args.PopAndReturn(result_array);
+ }
+
+ BIND(&call_c);
+ {
+ GotoIf(Word32NotEqual(IsBigInt64ElementsKind(source_el_kind),
+ IsBigInt64ElementsKind(target_el_kind)),
+ &if_bigint_mixed_types);
+
+ CallCCopyTypedArrayElementsSlice(
+ source, result_array, SmiToIntPtr(start_index), SmiToIntPtr(end_index));
+ args.PopAndReturn(result_array);
+ }
+
+ BIND(&if_typed_array_is_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&if_bigint_mixed_types);
+ ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
+}
+
+// ES %TypedArray%.prototype.subarray
+TF_BUILTIN(TypedArrayPrototypeSubArray, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.subarray";
+ Label offset_done(this);
+
+ TVARIABLE(Smi, var_begin);
+ TVARIABLE(Smi, var_end);
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ // 1. Let O be the this value.
+ // 3. If O does not have a [[TypedArrayName]] internal slot, throw a TypeError
+ // exception.
+ TNode<Object> receiver = args.GetReceiver();
+ ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, method_name);
+
+ TNode<JSTypedArray> source = CAST(receiver);
+
+ // 5. Let buffer be O.[[ViewedArrayBuffer]].
+ TNode<JSArrayBuffer> buffer = GetBuffer(context, source);
+ // 6. Let srcLength be O.[[ArrayLength]].
+ TNode<Smi> source_length =
+ LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // 7. Let relativeBegin be ? ToInteger(begin).
+ // 8. If relativeBegin < 0, let beginIndex be max((srcLength + relativeBegin),
+ // 0); else let beginIndex be min(relativeBegin, srcLength).
+ TNode<Object> begin = args.GetOptionalArgumentValue(0, SmiConstant(0));
+ var_begin =
+ SmiTag(ConvertToRelativeIndex(context, begin, SmiUntag(source_length)));
+
+ TNode<Object> end = args.GetOptionalArgumentValue(1, UndefinedConstant());
+ // 9. If end is undefined, let relativeEnd be srcLength;
+ var_end = source_length;
+ GotoIf(IsUndefined(end), &offset_done);
+
+ // else, let relativeEnd be ? ToInteger(end).
+ // 10. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd), 0);
+ // else let endIndex be min(relativeEnd, srcLength).
+ var_end =
+ SmiTag(ConvertToRelativeIndex(context, end, SmiUntag(source_length)));
+ Goto(&offset_done);
+
+ BIND(&offset_done);
+
+ // 11. Let newLength be max(endIndex - beginIndex, 0).
+ TNode<Smi> new_length =
+ SmiMax(SmiSub(var_end.value(), var_begin.value()), SmiConstant(0));
+
+ // 12. Let constructorName be the String value of O.[[TypedArrayName]].
+ // 13. Let elementSize be the Number value of the Element Size value specified
+ // in Table 52 for constructorName.
+ TNode<Word32T> element_kind = LoadElementsKind(source);
+ TNode<IntPtrT> element_size = GetTypedArrayElementSize(element_kind);
+
+ // 14. Let srcByteOffset be O.[[ByteOffset]].
+ TNode<Number> source_byte_offset =
+ LoadObjectField<Number>(source, JSTypedArray::kByteOffsetOffset);
+
+ // 15. Let beginByteOffset be srcByteOffset + beginIndex × elementSize.
+ TNode<Number> offset = SmiMul(var_begin.value(), SmiFromIntPtr(element_size));
+ TNode<Number> begin_byte_offset = NumberAdd(source_byte_offset, offset);
+
+ // 16. Let argumentsList be « buffer, beginByteOffset, newLength ».
+ // 17. Return ? TypedArraySpeciesCreate(O, argumentsList).
+ args.PopAndReturn(SpeciesCreateByArrayBuffer(
+ context, source, buffer, begin_byte_offset, new_length, method_name));
+}
+
// ES #sec-get-%typedarray%.prototype-@@tostringtag
TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1045,7 +1462,6 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Node* context, Node* receiver, const char* method_name,
IterationKind iteration_kind) {
Label throw_bad_receiver(this, Label::kDeferred);
- Label throw_typeerror(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
@@ -1063,22 +1479,11 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Return(CreateArrayIterator(receiver, map, instance_type, context,
iteration_kind));
- VARIABLE(var_message, MachineRepresentation::kTagged);
BIND(&throw_bad_receiver);
- var_message.Bind(SmiConstant(MessageTemplate::kNotTypedArray));
- Goto(&throw_typeerror);
+ ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name);
BIND(&if_receiverisneutered);
- var_message.Bind(SmiConstant(MessageTemplate::kDetachedOperation));
- Goto(&throw_typeerror);
-
- BIND(&throw_typeerror);
- {
- Node* method_arg = StringConstant(method_name);
- Node* result = CallRuntime(Runtime::kThrowTypeError, context,
- var_message.value(), method_arg);
- Return(result);
- }
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
// ES6 #sec-%typedarray%.prototype.values
@@ -1107,6 +1512,427 @@ TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
context, receiver, "%TypedArray%.prototype.keys()", IterationKind::kKeys);
}
+// ES6 #sec-%typedarray%.of
+TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ // 1. Let len be the actual number of arguments passed to this function.
+ TNode<IntPtrT> length = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ // 2. Let items be the List of arguments passed to this function.
+ CodeStubArguments args(this, length, nullptr, INTPTR_PARAMETERS,
+ CodeStubArguments::ReceiverMode::kHasReceiver);
+
+ Label if_not_constructor(this, Label::kDeferred),
+ if_neutered(this, Label::kDeferred);
+
+ // 3. Let C be the this value.
+ // 4. If IsConstructor(C) is false, throw a TypeError exception.
+ TNode<Object> receiver = args.GetReceiver();
+ GotoIf(TaggedIsSmi(receiver), &if_not_constructor);
+ GotoIfNot(IsConstructor(receiver), &if_not_constructor);
+
+ // 5. Let newObj be ? TypedArrayCreate(C, len).
+ TNode<JSTypedArray> new_typed_array =
+ CreateByLength(context, receiver, SmiTag(length), "%TypedArray%.of");
+
+ TNode<Word32T> elements_kind = LoadElementsKind(new_typed_array);
+
+ // 6. Let k be 0.
+ // 7. Repeat, while k < len
+ // a. Let kValue be items[k].
+ // b. Let Pk be ! ToString(k).
+ // c. Perform ? Set(newObj, Pk, kValue, true).
+ // d. Increase k by 1.
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ TNode<FixedTypedArrayBase> elements =
+ CAST(LoadElements(new_typed_array));
+ BuildFastLoop(
+ IntPtrConstant(0), length,
+ [&](Node* index) {
+ TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
+ TNode<IntPtrT> intptr_index = UncheckedCast<IntPtrT>(index);
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(new_typed_array, elements,
+ intptr_index, item, context,
+ &if_neutered);
+ } else {
+ Node* value =
+ PrepareValueForWriteToTypedArray(item, kind, context);
+
+ // ToNumber may execute JavaScript code, which could neuter
+ // the array's buffer.
+ Node* buffer = LoadObjectField(new_typed_array,
+ JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+
+ // GC may move backing store in ToNumber, thus load backing
+ // store everytime in this loop.
+ TNode<RawPtrT> backing_store =
+ LoadFixedTypedArrayBackingStore(elements);
+ StoreElement(backing_store, kind, index, value,
+ INTPTR_PARAMETERS);
+ }
+ },
+ 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ });
+
+ // 8. Return newObj.
+ args.PopAndReturn(new_typed_array);
+
+ BIND(&if_not_constructor);
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, receiver);
+
+ BIND(&if_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ "%TypedArray%.of");
+}
+
+TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+
+ Label fast_path(this), slow_path(this), done(this);
+
+ TVARIABLE(JSArray, created_list);
+
+ // This is a fast-path for ignoring the iterator.
+ // TODO(petermarshall): Port to CSA.
+ Node* elided =
+ CallRuntime(Runtime::kIterableToListCanBeElided, context, iterable);
+ CSA_ASSERT(this, IsBoolean(elided));
+ Branch(IsTrue(elided), &fast_path, &slow_path);
+
+ BIND(&fast_path);
+ {
+ created_list = CAST(iterable);
+ Goto(&done);
+ }
+
+ BIND(&slow_path);
+ {
+ IteratorBuiltinsAssembler iterator_assembler(state());
+
+ // 1. Let iteratorRecord be ? GetIterator(items, method).
+ IteratorRecord iterator_record =
+ iterator_assembler.GetIterator(context, iterable, iterator_fn);
+
+ // 2. Let values be a new empty List.
+ GrowableFixedArray values(state());
+
+ Variable* vars[] = {values.var_array(), values.var_length(),
+ values.var_capacity()};
+ Label loop_start(this, 3, vars), loop_end(this);
+ Goto(&loop_start);
+ // 3. Let next be true.
+ // 4. Repeat, while next is not false
+ BIND(&loop_start);
+ {
+ // a. Set next to ? IteratorStep(iteratorRecord).
+ TNode<Object> next = CAST(
+ iterator_assembler.IteratorStep(context, iterator_record, &loop_end));
+ // b. If next is not false, then
+ // i. Let nextValue be ? IteratorValue(next).
+ TNode<Object> next_value =
+ CAST(iterator_assembler.IteratorValue(context, next));
+ // ii. Append nextValue to the end of the List values.
+ values.Push(next_value);
+ Goto(&loop_start);
+ }
+ BIND(&loop_end);
+
+ // 5. Return values.
+ TNode<JSArray> js_array_values = values.ToJSArray(context);
+ created_list = js_array_values;
+ Goto(&done);
+ }
+
+ BIND(&done);
+ Return(created_list.value());
+}
+
+// ES6 #sec-%typedarray%.from
+TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+
+ Label check_iterator(this), from_array_like(this), fast_path(this),
+ slow_path(this), create_typed_array(this),
+ if_not_constructor(this, Label::kDeferred),
+ if_map_fn_not_callable(this, Label::kDeferred),
+ if_iterator_fn_not_callable(this, Label::kDeferred),
+ if_neutered(this, Label::kDeferred);
+
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+ TNode<Object> source = args.GetOptionalArgumentValue(0);
+
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(2);
+
+ // 1. Let C be the this value.
+ // 2. If IsConstructor(C) is false, throw a TypeError exception.
+ TNode<Object> receiver = args.GetReceiver();
+ GotoIf(TaggedIsSmi(receiver), &if_not_constructor);
+ GotoIfNot(IsConstructor(receiver), &if_not_constructor);
+
+ // 3. If mapfn is present and mapfn is not undefined, then
+ TNode<Object> map_fn = args.GetOptionalArgumentValue(1);
+ TVARIABLE(BoolT, mapping, Int32FalseConstant());
+ GotoIf(IsUndefined(map_fn), &check_iterator);
+
+ // a. If IsCallable(mapfn) is false, throw a TypeError exception.
+ // b. Let mapping be true.
+ // 4. Else, let mapping be false.
+ GotoIf(TaggedIsSmi(map_fn), &if_map_fn_not_callable);
+ GotoIfNot(IsCallable(map_fn), &if_map_fn_not_callable);
+ mapping = Int32TrueConstant();
+ Goto(&check_iterator);
+
+ TVARIABLE(Object, final_source);
+ TVARIABLE(Smi, final_length);
+
+ // We split up this builtin differently to the way it is written in the spec.
+ // We already have great code in the elements accessor for copying from a
+ // JSArray into a TypedArray, so we use that when possible. We only avoid
+ // calling into the elements accessor when we have a mapping function, because
+ // we can't handle that. Here, presence of a mapping function is the slow
+ // path. We also combine the two different loops in the specification
+ // (starting at 7.e and 13) because they are essentially identical. We also
+ // save on code-size this way.
+
+ BIND(&check_iterator);
+ {
+ // 6. Let usingIterator be ? GetMethod(source, @@iterator).
+ TNode<Object> iterator_fn =
+ CAST(GetMethod(context, source, isolate()->factory()->iterator_symbol(),
+ &from_array_like));
+ GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
+ GotoIfNot(IsCallable(iterator_fn), &if_iterator_fn_not_callable);
+
+ // We are using the iterator.
+ Label if_length_not_smi(this, Label::kDeferred);
+ // 7. If usingIterator is not undefined, then
+ // a. Let values be ? IterableToList(source, usingIterator).
+ // b. Let len be the number of elements in values.
+ TNode<JSArray> values = CAST(
+ CallBuiltin(Builtins::kIterableToList, context, source, iterator_fn));
+
+ // This is not a spec'd limit, so it doesn't particularly matter when we
+ // throw the range error for typed array length > MaxSmi.
+ TNode<Object> raw_length = LoadJSArrayLength(values);
+ GotoIfNot(TaggedIsSmi(raw_length), &if_length_not_smi);
+
+ final_length = CAST(raw_length);
+ final_source = values;
+ Goto(&create_typed_array);
+
+ BIND(&if_length_not_smi);
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ raw_length);
+ }
+
+ BIND(&from_array_like);
+ {
+ Label if_length_not_smi(this, Label::kDeferred);
+ final_source = source;
+
+ // 10. Let len be ? ToLength(? Get(arrayLike, "length")).
+ TNode<Object> raw_length =
+ GetProperty(context, final_source.value(), LengthStringConstant());
+ final_length = ToSmiLength(raw_length, context, &if_length_not_smi);
+ Goto(&create_typed_array);
+
+ BIND(&if_length_not_smi);
+ ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
+ raw_length);
+ }
+
+ TVARIABLE(JSTypedArray, target_obj);
+
+ BIND(&create_typed_array);
+ {
+ // 7c/11. Let targetObj be ? TypedArrayCreate(C, «len»).
+ target_obj = CreateByLength(context, receiver, final_length.value(),
+ "%TypedArray%.from");
+
+ Branch(mapping.value(), &slow_path, &fast_path);
+ }
+
+ BIND(&fast_path);
+ {
+ Label done(this);
+ GotoIf(SmiEqual(final_length.value(), SmiConstant(0)), &done);
+
+ CallRuntime(Runtime::kTypedArrayCopyElements, context, target_obj.value(),
+ final_source.value(), final_length.value());
+ Goto(&done);
+
+ BIND(&done);
+ args.PopAndReturn(target_obj.value());
+ }
+
+ BIND(&slow_path);
+ TNode<Word32T> elements_kind = LoadElementsKind(target_obj.value());
+
+ // 7e/13 : Copy the elements
+ TNode<FixedTypedArrayBase> elements = CAST(LoadElements(target_obj.value()));
+ BuildFastLoop(
+ SmiConstant(0), final_length.value(),
+ [&](Node* index) {
+ TNode<Object> const k_value =
+ GetProperty(context, final_source.value(), index);
+
+ TNode<Object> const mapped_value =
+ CAST(CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg,
+ k_value, index));
+
+ TNode<IntPtrT> intptr_index = SmiUntag(index);
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(target_obj.value(), elements,
+ intptr_index, mapped_value,
+ context, &if_neutered);
+ } else {
+ Node* const final_value = PrepareValueForWriteToTypedArray(
+ mapped_value, kind, context);
+
+ // ToNumber may execute JavaScript code, which could neuter
+ // the array's buffer.
+ Node* buffer = LoadObjectField(target_obj.value(),
+ JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_neutered);
+
+ // GC may move backing store in map_fn, thus load backing
+ // store in each iteration of this loop.
+ TNode<RawPtrT> backing_store =
+ LoadFixedTypedArrayBackingStore(elements);
+ StoreElement(backing_store, kind, index, final_value,
+ SMI_PARAMETERS);
+ }
+ });
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ args.PopAndReturn(target_obj.value());
+
+ BIND(&if_not_constructor);
+ ThrowTypeError(context, MessageTemplate::kNotConstructor, receiver);
+
+ BIND(&if_map_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, map_fn);
+
+ BIND(&if_iterator_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
+
+ BIND(&if_neutered);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation,
+ "%TypedArray%.from");
+}
+
+// ES %TypedArray%.prototype.filter
+TF_BUILTIN(TypedArrayPrototypeFilter, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.filter";
+
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ CodeStubArguments args(
+ this, ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)));
+
+ Label if_callback_not_callable(this, Label::kDeferred),
+ detached(this, Label::kDeferred);
+
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ TNode<Object> receiver = args.GetReceiver();
+ TNode<JSTypedArray> source =
+ ValidateTypedArray(context, receiver, method_name);
+
+ // 3. Let len be O.[[ArrayLength]].
+ TNode<Smi> length = LoadObjectField<Smi>(source, JSTypedArray::kLengthOffset);
+
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
+ TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
+ GotoIf(TaggedIsSmi(callbackfn), &if_callback_not_callable);
+ GotoIfNot(IsCallable(callbackfn), &if_callback_not_callable);
+
+ // 5. If thisArg is present, let T be thisArg; else let T be undefined.
+ TNode<Object> this_arg = args.GetOptionalArgumentValue(1);
+
+ TNode<JSArrayBuffer> source_buffer =
+ LoadObjectField<JSArrayBuffer>(source, JSArrayBufferView::kBufferOffset);
+ TNode<Word32T> elements_kind = LoadElementsKind(source);
+ GrowableFixedArray values(state());
+ VariableList vars(
+ {values.var_array(), values.var_length(), values.var_capacity()}, zone());
+
+ // 6. Let kept be a new empty List.
+ // 7. Let k be 0.
+ // 8. Let captured be 0.
+ // 9. Repeat, while k < len
+ BuildFastLoop(
+ vars, SmiConstant(0), length,
+ [&](Node* index) {
+ GotoIf(IsDetachedBuffer(source_buffer), &detached);
+
+ TVARIABLE(Numeric, value);
+ // a. Let Pk be ! ToString(k).
+ // b. Let kValue be ? Get(O, Pk).
+ DispatchTypedArrayByElementsKind(
+ elements_kind,
+ [&](ElementsKind kind, int size, int typed_array_fun_index) {
+ TNode<IntPtrT> backing_store =
+ UncheckedCast<IntPtrT>(LoadDataPtr(source));
+ value = CAST(LoadFixedTypedArrayElementAsTagged(
+ backing_store, index, kind, ParameterMode::SMI_PARAMETERS));
+ });
+
+ // c. Let selected be ToBoolean(Call(callbackfn, T, kValue, k, O))
+ Node* selected =
+ CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg,
+ value.value(), index, source);
+
+ Label true_continue(this), false_continue(this);
+ BranchIfToBooleanIsTrue(selected, &true_continue, &false_continue);
+
+ BIND(&true_continue);
+ // d. If selected is true, then
+ // i. Append kValue to the end of kept.
+ // ii. Increase captured by 1.
+ values.Push(value.value());
+ Goto(&false_continue);
+
+ BIND(&false_continue);
+ },
+ 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
+
+ TNode<JSArray> values_array = values.ToJSArray(context);
+ TNode<Smi> captured = LoadFastJSArrayLength(values_array);
+
+ // 10. Let A be ? TypedArraySpeciesCreate(O, captured).
+ TNode<JSTypedArray> result_array =
+ SpeciesCreateByLength(context, source, captured, method_name);
+
+ // 11. Let n be 0.
+ // 12. For each element e of kept, do
+ // a. Perform ! Set(A, ! ToString(n), e, true).
+ // b. Increment n by 1.
+ CallRuntime(Runtime::kTypedArrayCopyElements, context, result_array,
+ values_array, captured);
+
+ // 13. Return A.
+ args.PopAndReturn(result_array);
+
+ BIND(&if_callback_not_callable);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, callbackfn);
+
+ BIND(&detached);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+}
+
#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.h b/deps/v8/src/builtins/builtins-typedarray-gen.h
new file mode 100644
index 0000000000..37f923dea6
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.h
@@ -0,0 +1,133 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
+#define V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<JSTypedArray> SpeciesCreateByLength(TNode<Context> context,
+ TNode<JSTypedArray> exemplar,
+ TNode<Smi> len,
+ const char* method_name);
+
+ protected:
+ void GenerateTypedArrayPrototypeGetter(Node* context, Node* receiver,
+ const char* method_name,
+ int object_offset);
+ void GenerateTypedArrayPrototypeIterationMethod(Node* context, Node* receiver,
+ const char* method_name,
+ IterationKind iteration_kind);
+
+ void ConstructByLength(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<Object> length, TNode<Smi> element_size);
+ void ConstructByArrayBuffer(TNode<Context> context,
+ TNode<JSTypedArray> holder,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Object> byte_offset, TNode<Object> length,
+ TNode<Smi> element_size);
+ void ConstructByTypedArray(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSTypedArray> typed_array,
+ TNode<Smi> element_size);
+ void ConstructByArrayLike(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<HeapObject> array_like,
+ TNode<Object> initial_length,
+ TNode<Smi> element_size);
+ void ConstructByIterable(TNode<Context> context, TNode<JSTypedArray> holder,
+ TNode<JSReceiver> iterable,
+ TNode<Object> iterator_fn, TNode<Smi> element_size);
+
+ void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
+ TNode<Number> byte_offset, TNode<Number> byte_length);
+ void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
+ TNode<Map> map, TNode<Smi> length,
+ TNode<Number> byte_offset);
+
+ TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
+ TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
+ TNode<Number> byte_offset);
+ Node* LoadDataPtr(Node* typed_array);
+ TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length);
+
+ // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
+ TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
+
+ // Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS.
+ TNode<Word32T> IsBigInt64ElementsKind(TNode<Word32T> kind);
+
+ // Loads the element kind of TypedArray instance.
+ TNode<Word32T> LoadElementsKind(TNode<JSTypedArray> typed_array);
+
+ // Returns the byte size of an element for a TypedArray elements kind.
+ TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
+
+ TNode<Object> GetDefaultConstructor(TNode<Context> context,
+ TNode<JSTypedArray> exemplar);
+
+ TNode<Object> TypedArraySpeciesConstructor(TNode<Context> context,
+ TNode<JSTypedArray> exemplar);
+
+ TNode<JSTypedArray> SpeciesCreateByArrayBuffer(TNode<Context> context,
+ TNode<JSTypedArray> exemplar,
+ TNode<JSArrayBuffer> buffer,
+ TNode<Number> byte_offset,
+ TNode<Smi> len,
+ const char* method_name);
+
+ TNode<JSTypedArray> CreateByLength(TNode<Context> context,
+ TNode<Object> constructor, TNode<Smi> len,
+ const char* method_name);
+
+ TNode<JSArrayBuffer> GetBuffer(TNode<Context> context,
+ TNode<JSTypedArray> array);
+
+ TNode<JSTypedArray> ValidateTypedArray(TNode<Context> context,
+ TNode<Object> obj,
+ const char* method_name);
+
+ // Fast path for setting a TypedArray (source) onto another TypedArray
+ // (target) at an element offset.
+ void SetTypedArraySource(TNode<Context> context, TNode<JSTypedArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void SetJSArraySource(TNode<Context> context, TNode<JSArray> source,
+ TNode<JSTypedArray> target, TNode<IntPtrT> offset,
+ Label* call_runtime, Label* if_source_too_large);
+
+ void CallCMemmove(TNode<IntPtrT> dest_ptr, TNode<IntPtrT> src_ptr,
+ TNode<IntPtrT> byte_length);
+
+ void CallCCopyFastNumberJSArrayElementsToTypedArray(
+ TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length, TNode<IntPtrT> offset);
+
+ void CallCCopyTypedArrayElementsToTypedArray(TNode<JSTypedArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> source_length,
+ TNode<IntPtrT> offset);
+
+ void CallCCopyTypedArrayElementsSlice(TNode<JSTypedArray> source,
+ TNode<JSTypedArray> dest,
+ TNode<IntPtrT> start,
+ TNode<IntPtrT> end);
+
+ typedef std::function<void(ElementsKind, int, int)> TypedArraySwitchCase;
+
+ void DispatchTypedArrayByElementsKind(
+ TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_TYPEDARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 18625c8d90..6fcc279c66 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -42,16 +42,6 @@ int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
: std::min<int64_t>(relative, maximum);
}
-MaybeHandle<JSTypedArray> TypedArraySpeciesCreateByLength(
- Isolate* isolate, Handle<JSTypedArray> exemplar, const char* method_name,
- int64_t length) {
- const int argc = 1;
- ScopedVector<Handle<Object>> argv(argc);
- argv[0] = isolate->factory()->NewNumberFromInt64(length);
- return JSTypedArray::SpeciesCreate(isolate, exemplar, argc, argv.start(),
- method_name);
-}
-
} // namespace
BUILTIN(TypedArrayPrototypeCopyWithin) {
@@ -124,10 +114,16 @@ BUILTIN(TypedArrayPrototypeFill) {
const char* method = "%TypedArray%.prototype.fill";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ ElementsKind kind = array->GetElementsKind();
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj_value, Object::ToNumber(obj_value));
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
+ BigInt::FromObject(isolate, obj_value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value,
+ Object::ToNumber(obj_value));
+ }
int64_t len = array->length_value();
int64_t start = 0;
@@ -161,9 +157,9 @@ BUILTIN(TypedArrayPrototypeFill) {
DCHECK_LE(end, len);
DCHECK_LE(count, len);
- return array->GetElementsAccessor()->Fill(isolate, array, obj_value,
- static_cast<uint32_t>(start),
- static_cast<uint32_t>(end));
+ return ElementsAccessor::ForKind(kind)->Fill(isolate, array, obj_value,
+ static_cast<uint32_t>(start),
+ static_cast<uint32_t>(end));
}
BUILTIN(TypedArrayPrototypeIncludes) {
@@ -277,49 +273,5 @@ BUILTIN(TypedArrayPrototypeReverse) {
return *array;
}
-BUILTIN(TypedArrayPrototypeSlice) {
- HandleScope scope(isolate);
-
- Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.slice";
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
-
- int64_t len = array->length_value();
- int64_t start = 0;
- int64_t end = len;
- {
- Handle<Object> num = args.atOrUndefined(isolate, 1);
- if (!num->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
- Object::ToInteger(isolate, num));
- start = CapRelativeIndex(num, 0, len);
-
- num = args.atOrUndefined(isolate, 2);
- if (!num->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
- Object::ToInteger(isolate, num));
- end = CapRelativeIndex(num, 0, len);
- }
- }
- }
-
- int64_t count = std::max<int64_t>(end - start, 0);
-
- Handle<JSTypedArray> result_array;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_array,
- TypedArraySpeciesCreateByLength(isolate, array, method, count));
-
- // TODO(cwhan.tunz): should throw.
- if (V8_UNLIKELY(array->WasNeutered())) return *result_array;
-
- if (count == 0) return *result_array;
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- return *accessor->Slice(array, static_cast<uint32_t>(start),
- static_cast<uint32_t>(end), result_array);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index dc175e50b7..ad1763a292 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -86,8 +86,9 @@ Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
void Builtins::TearDown() { initialized_ = false; }
void Builtins::IterateBuiltins(RootVisitor* v) {
- v->VisitRootPointers(Root::kBuiltins, &builtins_[0],
- &builtins_[0] + builtin_count);
+ for (int i = 0; i < builtin_count; i++) {
+ v->VisitRootPointer(Root::kBuiltins, name(i), &builtins_[i]);
+ }
}
const char* Builtins::Lookup(byte* pc) {
@@ -170,30 +171,11 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN)
#undef CASE_OTHER
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFilterLoopLazyDeoptContinuation:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayEveryLoopLazyDeoptContinuation:
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEach:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayMapLoopLazyDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kArraySomeLoopLazyDeoptContinuation:
- case kConsoleAssert:
- return Callable(code, BuiltinDescriptor(isolate));
default:
+ Builtins::Kind kind = Builtins::KindOf(name);
+ if (kind == TFJ || kind == CPP) {
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
UNREACHABLE();
}
CallInterfaceDescriptor descriptor(isolate, key);
@@ -213,6 +195,22 @@ Address Builtins::CppEntryOf(int index) {
}
// static
+bool Builtins::IsBuiltin(Code* code) {
+ return Builtins::IsBuiltinId(code->builtin_index());
+}
+
+// static
+bool Builtins::IsOffHeapBuiltin(Code* code) {
+#ifdef V8_EMBEDDED_BUILTINS
+ return FLAG_stress_off_heap_code &&
+ Builtins::IsBuiltinId(code->builtin_index()) &&
+ Builtins::IsOffHeapSafe(code->builtin_index());
+#else
+ return false;
+#endif
+}
+
+// static
bool Builtins::IsLazy(int index) {
DCHECK(IsBuiltinId(index));
// There are a couple of reasons that builtins can require eager-loading,
@@ -245,12 +243,16 @@ bool Builtins::IsLazy(int index) {
case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReducePreLoopEagerDeoptContinuation:
case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
case kArrayReduceRightLoopEagerDeoptContinuation:
case kArrayReduceRightLoopLazyDeoptContinuation:
case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
+ case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
case kCheckOptimizationMarker:
case kCompileLazy:
case kDeserializeLazy:
@@ -261,9 +263,11 @@ bool Builtins::IsLazy(int index) {
case kInterpreterEnterBytecodeDispatch:
case kInterpreterEntryTrampoline:
case kObjectConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kPromiseConstructorLazyDeoptContinuation: // crbug/v8/6786.
case kProxyConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kNumberConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kStringConstructor_ConstructStub: // https://crbug.com/v8/6787.
+ case kTypedArrayConstructor_ConstructStub: // https://crbug.com/v8/6787.
case kProxyConstructor: // https://crbug.com/v8/6787.
case kRecordWrite: // https://crbug.com/chromium/765301.
case kThrowWasmTrapDivByZero: // Required by wasm.
@@ -286,6 +290,1090 @@ bool Builtins::IsLazy(int index) {
}
// static
+bool Builtins::IsIsolateIndependent(int index) {
+ DCHECK(IsBuiltinId(index));
+ switch (index) {
+#ifdef DEBUG
+ case kAbortJS:
+ case kAllocateHeapNumber:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDivide:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kIncrement:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLoadField:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kMapPrototypeEntries:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberPrototypeValueOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeValueOf:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromiseResolveTrampoline:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kReflectHas:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kReturnReceiver:
+ case kSetPrototypeEntries:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+#endif
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToInteger:
+ case kTypedArrayConstructor:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#else
+ case kAbortJS:
+ case kAdd:
+ case kAllocateHeapNumber:
+ case kArrayEvery:
+ case kArrayEveryLoopContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEach:
+ case kArrayForEachLoopContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayFrom:
+ case kArrayIncludes:
+ case kArrayIndexOf:
+ case kArrayIsArray:
+ case kArrayMapLoopContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayOf:
+ case kArrayPrototypeEntries:
+ case kArrayPrototypeFind:
+ case kArrayPrototypeFindIndex:
+ case kArrayPrototypeKeys:
+ case kArrayPrototypeSlice:
+ case kArrayPrototypeValues:
+ case kArrayReduce:
+ case kArrayReduceLoopContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRight:
+ case kArrayReduceRightLoopContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySome:
+ case kArraySomeLoopContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
+ case kAsyncFromSyncIteratorPrototypeNext:
+ case kAsyncFromSyncIteratorPrototypeReturn:
+ case kAsyncFromSyncIteratorPrototypeThrow:
+ case kAsyncFunctionAwaitFulfill:
+ case kAsyncFunctionAwaitReject:
+ case kAsyncFunctionPromiseCreate:
+ case kAsyncFunctionPromiseRelease:
+ case kAsyncGeneratorAwaitFulfill:
+ case kAsyncGeneratorAwaitReject:
+ case kAsyncGeneratorResumeNext:
+ case kAsyncGeneratorReturnClosedFulfill:
+ case kAsyncGeneratorReturnClosedReject:
+ case kAsyncGeneratorReturnFulfill:
+ case kAsyncGeneratorYieldFulfill:
+ case kAsyncIteratorValueUnwrap:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kCallProxy:
+ case kConstructFunction:
+ case kConstructProxy:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kCreateGeneratorObject:
+ case kCreateIterResultObject:
+ case kCreateRegExpLiteral:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDeleteProperty:
+ case kDivide:
+ case kEqual:
+ case kFastConsoleAssert:
+ case kFastNewClosure:
+ case kFastNewFunctionContextEval:
+ case kFastNewFunctionContextFunction:
+ case kFastNewObject:
+ case kFindOrderedHashMapEntry:
+ case kForInEnumerate:
+ case kForInFilter:
+ case kFunctionPrototypeHasInstance:
+ case kGeneratorPrototypeNext:
+ case kGeneratorPrototypeReturn:
+ case kGeneratorPrototypeThrow:
+ case kGetSuperConstructor:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ case kHasProperty:
+ case kIncrement:
+ case kInstanceOf:
+ case kKeyedLoadIC_Megamorphic:
+ case kKeyedLoadIC_PolymorphicName:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLessThan:
+ case kLessThanOrEqual:
+ case kLoadField:
+ case kLoadGlobalIC:
+ case kLoadGlobalICInsideTypeof:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC:
+ case kLoadIC_FunctionPrototype:
+ case kLoadIC_Noninlined:
+ case kLoadIC_Slow:
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
+ case kLoadICTrampoline:
+ case kLoadIC_Uninitialized:
+ case kMapPrototypeEntries:
+ case kMapPrototypeForEach:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNegate:
+ case kNewArgumentsElements:
+ case kNonNumberToNumber:
+ case kNonNumberToNumeric:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberConstructor:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberParseFloat:
+ case kNumberPrototypeValueOf:
+ case kNumberToString:
+ case kObjectConstructor:
+ case kObjectConstructor_ConstructStub:
+ case kObjectCreate:
+ case kObjectIs:
+ case kObjectKeys:
+ case kObjectPrototypeHasOwnProperty:
+ case kObjectPrototypeIsPrototypeOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeToString:
+ case kObjectPrototypeValueOf:
+ case kOrderedHashTableHealIndex:
+ case kOrdinaryHasInstance:
+ case kOrdinaryToPrimitive_Number:
+ case kOrdinaryToPrimitive_String:
+ case kPromiseAll:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseCatchFinally:
+ case kPromiseConstructor:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseFulfillReactionJob:
+ case kPromiseInternalConstructor:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromisePrototypeCatch:
+ case kPromisePrototypeFinally:
+ case kPromiseRace:
+ case kPromiseReject:
+ case kPromiseRejectReactionJob:
+ case kPromiseResolve:
+ case kPromiseResolveThenableJob:
+ case kPromiseResolveTrampoline:
+ case kPromiseThenFinally:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kProxyGetProperty:
+ case kProxyHasProperty:
+ case kProxySetProperty:
+ case kRecordWrite:
+ case kReflectHas:
+ case kRegExpConstructor:
+ case kRegExpPrototypeCompile:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeFlagsGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeReplace:
+ case kRegExpPrototypeSearch:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeSplit:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kResolvePromise:
+ case kReturnReceiver:
+ case kRunMicrotasks:
+ case kSameValue:
+ case kSetPrototypeEntries:
+ case kSetPrototypeForEach:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeHas:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStrictEqual:
+ case kStringCodePointAtUTF16:
+ case kStringCodePointAtUTF32:
+ case kStringConstructor:
+ case kStringEqual:
+ case kStringGreaterThan:
+ case kStringGreaterThanOrEqual:
+ case kStringIndexOf:
+ case kStringLessThan:
+ case kStringLessThanOrEqual:
+ case kStringPrototypeAnchor:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeCharCodeAt:
+ case kStringPrototypeCodePointAt:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeFontcolor:
+ case kStringPrototypeFontsize:
+ case kStringPrototypeIncludes:
+ case kStringPrototypeIndexOf:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeLink:
+ case kStringPrototypeMatch:
+ case kStringPrototypePadEnd:
+ case kStringPrototypePadStart:
+ case kStringPrototypeRepeat:
+ case kStringPrototypeReplace:
+ case kStringPrototypeSearch:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+ case kStringToLowerCaseIntl:
+#endif
+ case kStringPrototypeToString:
+ case kStringPrototypeValueOf:
+ case kStringRepeat:
+ case kStringToNumber:
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToBoolean:
+ case kToBooleanLazyDeoptContinuation:
+ case kToInteger:
+ case kToInteger_TruncateMinusZero:
+ case kToName:
+ case kToNumber:
+ case kToNumeric:
+ case kToString:
+ case kTypedArrayConstructor:
+ case kTypedArrayConstructor_ConstructStub:
+ case kTypedArrayPrototypeByteLength:
+ case kTypedArrayPrototypeByteOffset:
+ case kTypedArrayPrototypeEntries:
+ case kTypedArrayPrototypeEvery:
+ case kTypedArrayPrototypeFind:
+ case kTypedArrayPrototypeFindIndex:
+ case kTypedArrayPrototypeForEach:
+ case kTypedArrayPrototypeKeys:
+ case kTypedArrayPrototypeLength:
+ case kTypedArrayPrototypeReduce:
+ case kTypedArrayPrototypeReduceRight:
+ case kTypedArrayPrototypeSet:
+ case kTypedArrayPrototypeSlice:
+ case kTypedArrayPrototypeSome:
+ case kTypedArrayPrototypeSubArray:
+ case kTypedArrayPrototypeToStringTag:
+ case kTypedArrayPrototypeValues:
+ case kTypeof:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapLookupHashIndex:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#endif
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+// static
+bool Builtins::IsOffHeapSafe(int index) {
+#ifndef V8_EMBEDDED_BUILTINS
+ return false;
+#else
+ DCHECK(IsBuiltinId(index));
+ if (IsTooShortForOffHeapTrampoline(index)) return false;
+ switch (index) {
+#ifdef DEBUG
+ case kAbortJS:
+ case kAllocateHeapNumber:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDivide:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kIncrement:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLoadField:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kMapPrototypeEntries:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberPrototypeValueOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeValueOf:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromiseResolveTrampoline:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kReflectHas:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kReturnReceiver:
+ case kSetPrototypeEntries:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+#endif
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToInteger:
+ case kTypedArrayConstructor:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#else
+ case kAbortJS:
+ case kAdd:
+ case kAllocateHeapNumber:
+ case kArrayEvery:
+ case kArrayEveryLoopContinuation:
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindIndexLoopContinuation:
+ case kArrayFindIndexLoopEagerDeoptContinuation:
+ case kArrayFindIndexLoopLazyDeoptContinuation:
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEach:
+ case kArrayForEachLoopContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayFrom:
+ case kArrayIncludes:
+ case kArrayIndexOf:
+ case kArrayIsArray:
+ case kArrayMapLoopContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayOf:
+ case kArrayPrototypeEntries:
+ case kArrayPrototypeFind:
+ case kArrayPrototypeFindIndex:
+ case kArrayPrototypeKeys:
+ case kArrayPrototypeSlice:
+ case kArrayPrototypeValues:
+ case kArrayReduce:
+ case kArrayReduceLoopContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
+ case kArrayReducePreLoopEagerDeoptContinuation:
+ case kArrayReduceRight:
+ case kArrayReduceRightLoopContinuation:
+ case kArrayReduceRightLoopEagerDeoptContinuation:
+ case kArrayReduceRightLoopLazyDeoptContinuation:
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySome:
+ case kArraySomeLoopContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
+ case kAsyncFromSyncIteratorPrototypeNext:
+ case kAsyncFromSyncIteratorPrototypeReturn:
+ case kAsyncFromSyncIteratorPrototypeThrow:
+ case kAsyncFunctionAwaitFulfill:
+ case kAsyncFunctionAwaitReject:
+ case kAsyncFunctionPromiseCreate:
+ case kAsyncFunctionPromiseRelease:
+ case kAsyncGeneratorAwaitFulfill:
+ case kAsyncGeneratorAwaitReject:
+ case kAsyncGeneratorResumeNext:
+ case kAsyncGeneratorReturnClosedFulfill:
+ case kAsyncGeneratorReturnClosedReject:
+ case kAsyncGeneratorReturnFulfill:
+ case kAsyncGeneratorYieldFulfill:
+ case kAsyncIteratorValueUnwrap:
+ case kBitwiseNot:
+ case kBooleanPrototypeToString:
+ case kBooleanPrototypeValueOf:
+ case kCallProxy:
+ case kConstructFunction:
+ case kConstructProxy:
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kCreateGeneratorObject:
+ case kCreateIterResultObject:
+ case kCreateRegExpLiteral:
+ case kDatePrototypeGetDate:
+ case kDatePrototypeGetDay:
+ case kDatePrototypeGetFullYear:
+ case kDatePrototypeGetHours:
+ case kDatePrototypeGetMilliseconds:
+ case kDatePrototypeGetMinutes:
+ case kDatePrototypeGetMonth:
+ case kDatePrototypeGetSeconds:
+ case kDatePrototypeGetTime:
+ case kDatePrototypeGetTimezoneOffset:
+ case kDatePrototypeGetUTCDate:
+ case kDatePrototypeGetUTCDay:
+ case kDatePrototypeGetUTCFullYear:
+ case kDatePrototypeGetUTCHours:
+ case kDatePrototypeGetUTCMilliseconds:
+ case kDatePrototypeGetUTCMinutes:
+ case kDatePrototypeGetUTCMonth:
+ case kDatePrototypeGetUTCSeconds:
+ case kDatePrototypeToPrimitive:
+ case kDatePrototypeValueOf:
+ case kDecrement:
+ case kDeleteProperty:
+ case kDivide:
+ case kEqual:
+ case kFastConsoleAssert:
+ case kFastNewClosure:
+ case kFastNewFunctionContextEval:
+ case kFastNewFunctionContextFunction:
+ case kFastNewObject:
+ case kFindOrderedHashMapEntry:
+ case kForInEnumerate:
+ case kForInFilter:
+ case kFunctionPrototypeHasInstance:
+ case kGeneratorPrototypeNext:
+ case kGeneratorPrototypeReturn:
+ case kGeneratorPrototypeThrow:
+ case kGetSuperConstructor:
+ case kGlobalIsFinite:
+ case kGlobalIsNaN:
+ case kGreaterThan:
+ case kGreaterThanOrEqual:
+ case kHasProperty:
+ case kIncrement:
+ case kInstanceOf:
+ case kKeyedLoadIC_Megamorphic:
+ case kKeyedLoadIC_PolymorphicName:
+ case kKeyedLoadIC_Slow:
+ case kKeyedLoadICTrampoline:
+ case kKeyedStoreIC_Slow:
+ case kKeyedStoreICTrampoline:
+ case kLessThan:
+ case kLessThanOrEqual:
+ case kLoadField:
+ case kLoadGlobalIC:
+ case kLoadGlobalICInsideTypeof:
+ case kLoadGlobalICInsideTypeofTrampoline:
+ case kLoadGlobalIC_Slow:
+ case kLoadGlobalICTrampoline:
+ case kLoadIC:
+ case kLoadIC_FunctionPrototype:
+ case kLoadIC_Noninlined:
+ case kLoadIC_Slow:
+ case kLoadICTrampoline:
+ case kLoadIC_Uninitialized:
+ case kMapPrototypeEntries:
+ case kMapPrototypeForEach:
+ case kMapPrototypeGet:
+ case kMapPrototypeGetSize:
+ case kMapPrototypeHas:
+ case kMapPrototypeKeys:
+ case kMapPrototypeValues:
+ case kMathAcos:
+ case kMathAcosh:
+ case kMathAsin:
+ case kMathAsinh:
+ case kMathAtan:
+ case kMathAtan2:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCeil:
+ case kMathCos:
+ case kMathCosh:
+ case kMathExp:
+ case kMathExpm1:
+ case kMathFloor:
+ case kMathFround:
+ case kMathLog:
+ case kMathLog10:
+ case kMathLog1p:
+ case kMathLog2:
+ case kMathMax:
+ case kMathMin:
+ case kMathRound:
+ case kMathSign:
+ case kMathSin:
+ case kMathSinh:
+ case kMathSqrt:
+ case kMathTan:
+ case kMathTanh:
+ case kMathTrunc:
+ case kModulus:
+ case kMultiply:
+ case kNegate:
+ case kNewArgumentsElements:
+ case kNonNumberToNumber:
+ case kNonNumberToNumeric:
+ case kNonPrimitiveToPrimitive_Default:
+ case kNonPrimitiveToPrimitive_Number:
+ case kNonPrimitiveToPrimitive_String:
+ case kNumberConstructor:
+ case kNumberIsFinite:
+ case kNumberIsInteger:
+ case kNumberIsNaN:
+ case kNumberIsSafeInteger:
+ case kNumberParseFloat:
+ case kNumberPrototypeValueOf:
+ case kNumberToString:
+ case kObjectConstructor:
+ case kObjectConstructor_ConstructStub:
+ case kObjectCreate:
+ case kObjectIs:
+ case kObjectKeys:
+ case kObjectPrototypeHasOwnProperty:
+ case kObjectPrototypeIsPrototypeOf:
+ case kObjectPrototypeToLocaleString:
+ case kObjectPrototypeToString:
+ case kObjectPrototypeValueOf:
+ case kOrderedHashTableHealIndex:
+ case kOrdinaryHasInstance:
+ case kOrdinaryToPrimitive_Number:
+ case kOrdinaryToPrimitive_String:
+ case kPromiseAll:
+ case kPromiseCapabilityDefaultReject:
+ case kPromiseCapabilityDefaultResolve:
+ case kPromiseCatchFinally:
+ case kPromiseConstructor:
+ case kPromiseConstructorLazyDeoptContinuation:
+ case kPromiseFulfillReactionJob:
+ case kPromiseInternalConstructor:
+ case kPromiseInternalReject:
+ case kPromiseInternalResolve:
+ case kPromisePrototypeCatch:
+ case kPromisePrototypeFinally:
+ case kPromiseRace:
+ case kPromiseReject:
+ case kPromiseRejectReactionJob:
+ case kPromiseResolve:
+ case kPromiseResolveThenableJob:
+ case kPromiseResolveTrampoline:
+ case kPromiseThenFinally:
+ case kPromiseThrowerFinally:
+ case kPromiseValueThunkFinally:
+ case kProxyConstructor:
+ case kProxyGetProperty:
+ case kProxyHasProperty:
+ case kProxySetProperty:
+ case kRecordWrite:
+ case kReflectHas:
+ case kRegExpConstructor:
+ case kRegExpPrototypeCompile:
+ case kRegExpPrototypeDotAllGetter:
+ case kRegExpPrototypeFlagsGetter:
+ case kRegExpPrototypeGlobalGetter:
+ case kRegExpPrototypeIgnoreCaseGetter:
+ case kRegExpPrototypeMultilineGetter:
+ case kRegExpPrototypeReplace:
+ case kRegExpPrototypeSearch:
+ case kRegExpPrototypeSourceGetter:
+ case kRegExpPrototypeSplit:
+ case kRegExpPrototypeStickyGetter:
+ case kRegExpPrototypeUnicodeGetter:
+ case kResolvePromise:
+ case kReturnReceiver:
+ case kRunMicrotasks:
+ case kSameValue:
+ case kSetPrototypeEntries:
+ case kSetPrototypeForEach:
+ case kSetPrototypeGetSize:
+ case kSetPrototypeHas:
+ case kSetPrototypeValues:
+ case kStoreGlobalIC_Slow:
+ case kStoreGlobalICTrampoline:
+ case kStoreICTrampoline:
+ case kStrictEqual:
+ case kStringCodePointAtUTF16:
+ case kStringCodePointAtUTF32:
+ case kStringConstructor:
+ case kStringEqual:
+ case kStringGreaterThan:
+ case kStringGreaterThanOrEqual:
+ case kStringIndexOf:
+ case kStringLessThan:
+ case kStringLessThanOrEqual:
+ case kStringPrototypeAnchor:
+ case kStringPrototypeBig:
+ case kStringPrototypeBlink:
+ case kStringPrototypeBold:
+ case kStringPrototypeCharCodeAt:
+ case kStringPrototypeCodePointAt:
+ case kStringPrototypeConcat:
+ case kStringPrototypeFixed:
+ case kStringPrototypeFontcolor:
+ case kStringPrototypeFontsize:
+ case kStringPrototypeIncludes:
+ case kStringPrototypeIndexOf:
+ case kStringPrototypeItalics:
+ case kStringPrototypeIterator:
+ case kStringPrototypeLink:
+ case kStringPrototypeMatch:
+ case kStringPrototypePadEnd:
+ case kStringPrototypePadStart:
+ case kStringPrototypeRepeat:
+ case kStringPrototypeReplace:
+ case kStringPrototypeSearch:
+ case kStringPrototypeSmall:
+ case kStringPrototypeStrike:
+ case kStringPrototypeSub:
+ case kStringPrototypeSup:
+#ifdef V8_INTL_SUPPORT
+ case kStringPrototypeToLowerCaseIntl:
+ case kStringToLowerCaseIntl:
+#endif
+ case kStringPrototypeToString:
+ case kStringPrototypeValueOf:
+ case kStringRepeat:
+ case kStringToNumber:
+ case kSubtract:
+ case kSymbolPrototypeToPrimitive:
+ case kSymbolPrototypeToString:
+ case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapDivByZero:
+ case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapFloatUnrepresentable:
+ case kThrowWasmTrapFuncInvalid:
+ case kThrowWasmTrapFuncSigMismatch:
+ case kThrowWasmTrapMemOutOfBounds:
+ case kThrowWasmTrapRemByZero:
+ case kThrowWasmTrapUnreachable:
+ case kToBoolean:
+ case kToBooleanLazyDeoptContinuation:
+ case kToInteger:
+ case kToInteger_TruncateMinusZero:
+ case kToName:
+ case kToNumber:
+ case kToNumeric:
+ case kToString:
+ case kTypedArrayConstructor:
+ case kTypedArrayConstructor_ConstructStub:
+ case kTypedArrayPrototypeByteLength:
+ case kTypedArrayPrototypeByteOffset:
+ case kTypedArrayPrototypeEntries:
+ case kTypedArrayPrototypeEvery:
+ case kTypedArrayPrototypeFind:
+ case kTypedArrayPrototypeFindIndex:
+ case kTypedArrayPrototypeForEach:
+ case kTypedArrayPrototypeKeys:
+ case kTypedArrayPrototypeLength:
+ case kTypedArrayPrototypeReduce:
+ case kTypedArrayPrototypeReduceRight:
+ case kTypedArrayPrototypeSet:
+ case kTypedArrayPrototypeSlice:
+ case kTypedArrayPrototypeSome:
+ case kTypedArrayPrototypeSubArray:
+ case kTypedArrayPrototypeToStringTag:
+ case kTypedArrayPrototypeValues:
+ case kTypeof:
+ case kWasmStackGuard:
+ case kWeakMapGet:
+ case kWeakMapHas:
+ case kWeakMapLookupHashIndex:
+ case kWeakMapPrototypeDelete:
+ case kWeakMapPrototypeSet:
+ case kWeakSetHas:
+ case kWeakSetPrototypeAdd:
+ case kWeakSetPrototypeDelete:
+#endif // !DEBUG
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+#endif // V8_EMBEDDED_BUILTINS
+}
+
+// static
+bool Builtins::IsTooShortForOffHeapTrampoline(int index) {
+ switch (index) {
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
Builtins::Kind Builtins::KindOf(int index) {
DCHECK(IsBuiltinId(index));
return builtin_metadata[index].kind;
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index d9090dc67e..bf96469d19 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -109,11 +109,33 @@ class Builtins {
static bool IsCpp(int index);
static bool HasCppImplementation(int index);
+ // True, iff the given code object is a builtin. Note that this does not
+ // necessarily mean that its kind is Code::BUILTIN.
+ static bool IsBuiltin(Code* code);
+
+ // True, iff the given code object is a builtin with off-heap code.
+ static bool IsOffHeapBuiltin(Code* code);
+
// Returns true iff the given builtin can be lazy-loaded from the snapshot.
// This is true in general for most builtins with the exception of a few
// special cases such as CompileLazy and DeserializeLazy.
static bool IsLazy(int index);
+ // Helper methods used for testing isolate-independent builtins.
+ // TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
+ static bool IsIsolateIndependent(int index);
+
+ // This is the condition we currently use to determine whether a builtin is
+ // copied off-heap when --stress-off-heap-code is passed. Such builtins do not
+ // need to be isolate-independent, e.g. they can contain external references
+ // that point to one specific isolate. A further restrictions is that there
+ // must be enough space for the trampoline.
+ static bool IsOffHeapSafe(int index);
+
+ // The off-heap trampoline is short but requires a certain minimal instruction
+ // size. This function states whether a given builtin is too short.
+ static bool IsTooShortForOffHeapTrampoline(int index);
+
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
new file mode 100644
index 0000000000..a4117bd5a2
--- /dev/null
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -0,0 +1,83 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/constants-table-builder.h"
+
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+BuiltinsConstantsTableBuilder::BuiltinsConstantsTableBuilder(Isolate* isolate)
+ : isolate_(isolate), map_(isolate->heap()) {
+ // Ensure this is only called once per Isolate.
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+
+ // And that the initial value of the builtins constants table can be treated
+ // as a constant, which means that codegen will load it using the root
+ // register.
+ DCHECK(isolate_->heap()->RootCanBeTreatedAsConstant(
+ Heap::kEmptyFixedArrayRootIndex));
+}
+
+uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
+#ifdef DEBUG
+ // Roots must not be inserted into the constants table as they are already
+ // accessibly from the root list.
+ Heap::RootListIndex root_list_index;
+ DCHECK(!isolate_->heap()->IsRootHandle(object, &root_list_index));
+
+ // Not yet finalized.
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+#endif
+
+ uint32_t* maybe_key = map_.Find(object);
+ if (maybe_key == nullptr) {
+ uint32_t index = map_.size();
+ map_.Set(object, index);
+ return index;
+ } else {
+ return *maybe_key;
+ }
+}
+
+void BuiltinsConstantsTableBuilder::Finalize() {
+ HandleScope handle_scope(isolate_);
+
+ DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
+ isolate_->heap()->builtins_constants_table());
+
+ DCHECK_LT(0, map_.size());
+ Handle<FixedArray> table =
+ isolate_->factory()->NewFixedArray(map_.size(), TENURED);
+
+ Builtins* builtins = isolate_->builtins();
+ ConstantsMap::IteratableScope it_scope(&map_);
+ for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
+ uint32_t index = *it.entry();
+ Object* value = it.key();
+ if (value->IsCode() && Code::cast(value)->kind() == Code::BUILTIN) {
+ // Replace placeholder code objects with the real builtin.
+ // See also: SetupIsolateDelegate::PopulateWithPlaceholders.
+ // TODO(jgruber): Deduplicate placeholders and their corresponding
+ // builtin.
+ value = builtins->builtin(Code::cast(value)->builtin_index());
+ }
+ table->set(index, value);
+ }
+
+#ifdef DEBUG
+ for (int i = 0; i < map_.size(); i++) {
+ DCHECK(table->get(i)->IsHeapObject());
+ DCHECK_NE(isolate_->heap()->undefined_value(), table->get(i));
+ }
+#endif
+
+ isolate_->heap()->SetBuiltinsConstantsTable(*table);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
new file mode 100644
index 0000000000..d251d5849b
--- /dev/null
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
+#define V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
+
+#include "src/allocation.h"
+#include "src/base/macros.h"
+#include "src/handles.h"
+#include "src/identity-map.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Object;
+
+// Utility class to build the builtins constants table and store it on the root
+// list. The constants table contains constants used by builtins, and is there
+// to avoid directly embedding them into code objects, which would not be
+// possible for off-heap (and thus immutable) code objects.
+class BuiltinsConstantsTableBuilder final {
+ public:
+ explicit BuiltinsConstantsTableBuilder(Isolate* isolate);
+
+ // Returns the index within the builtins constants list for the given object,
+ // possibly adding the object to the cache. Objects are deduplicated.
+ uint32_t AddObject(Handle<Object> object);
+
+ // Should be called after all affected code (e.g. builtins and bytecode
+ // handlers) has been generated.
+ void Finalize();
+
+ private:
+ Isolate* isolate_;
+
+ // Maps objects to corresponding indices within the constants list.
+ typedef IdentityMap<uint32_t, FreeStoreAllocationPolicy> ConstantsMap;
+ ConstantsMap map_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuiltinsConstantsTableBuilder)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
new file mode 100644
index 0000000000..3a155e26f9
--- /dev/null
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/growable-fixed-array-gen.h"
+
+#include "src/compiler/code-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void GrowableFixedArray::Push(TNode<Object> const value) {
+ TNode<IntPtrT> const length = var_length_.value();
+ TNode<IntPtrT> const capacity = var_capacity_.value();
+
+ Label grow(this), store(this);
+ Branch(IntPtrEqual(capacity, length), &grow, &store);
+
+ BIND(&grow);
+ {
+ var_capacity_ = NewCapacity(capacity);
+ var_array_ = ResizeFixedArray(length, var_capacity_.value());
+
+ Goto(&store);
+ }
+
+ BIND(&store);
+ {
+ TNode<FixedArray> const array = var_array_.value();
+ StoreFixedArrayElement(array, length, value);
+
+ var_length_ = IntPtrAdd(length, IntPtrConstant(1));
+ }
+}
+
+TNode<JSArray> GrowableFixedArray::ToJSArray(TNode<Context> const context) {
+ const ElementsKind kind = PACKED_ELEMENTS;
+
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Map> const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+ // Shrink to fit if necessary.
+ {
+ Label next(this);
+
+ TNode<IntPtrT> const length = var_length_.value();
+ TNode<IntPtrT> const capacity = var_capacity_.value();
+
+ GotoIf(WordEqual(length, capacity), &next);
+
+ var_array_ = ResizeFixedArray(length, length);
+ var_capacity_ = length;
+ Goto(&next);
+
+ BIND(&next);
+ }
+
+ TNode<Smi> const result_length = SmiTag(length());
+ TNode<JSArray> const result =
+ CAST(AllocateUninitializedJSArrayWithoutElements(array_map, result_length,
+ nullptr));
+
+ StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
+
+ return result;
+}
+
+TNode<IntPtrT> GrowableFixedArray::NewCapacity(
+ TNode<IntPtrT> current_capacity) {
+ CSA_ASSERT(this,
+ IntPtrGreaterThanOrEqual(current_capacity, IntPtrConstant(0)));
+
+ // Growth rate is analog to JSObject::NewElementsCapacity:
+ // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
+
+ TNode<IntPtrT> const new_capacity =
+ IntPtrAdd(IntPtrAdd(current_capacity, WordShr(current_capacity, 1)),
+ IntPtrConstant(16));
+
+ return new_capacity;
+}
+
+TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
+ TNode<IntPtrT> const element_count, TNode<IntPtrT> const new_capacity) {
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(element_count, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, element_count));
+
+ TNode<FixedArray> const from_array = var_array_.value();
+
+ CodeStubAssembler::ExtractFixedArrayFlags flags;
+ flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
+ TNode<FixedArray> to_array = CAST(ExtractFixedArray(
+ from_array, nullptr, element_count, new_capacity, flags));
+
+ return to_array;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.h b/deps/v8/src/builtins/growable-fixed-array-gen.h
new file mode 100644
index 0000000000..f720659dee
--- /dev/null
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.h
@@ -0,0 +1,56 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
+#define V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+template <class T>
+using TNode = compiler::TNode<T>;
+
+// Utility class implementing a growable fixed array through CSA.
+class GrowableFixedArray : public CodeStubAssembler {
+ public:
+ explicit GrowableFixedArray(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state),
+ var_array_(this),
+ var_length_(this),
+ var_capacity_(this) {
+ var_array_ = EmptyFixedArrayConstant();
+ var_capacity_ = IntPtrConstant(0);
+ var_length_ = IntPtrConstant(0);
+ }
+
+ TNode<IntPtrT> length() const { return var_length_.value(); }
+
+ TVariable<FixedArray>* var_array() { return &var_array_; }
+ TVariable<IntPtrT>* var_length() { return &var_length_; }
+ TVariable<IntPtrT>* var_capacity() { return &var_capacity_; }
+
+ void Push(TNode<Object> const value);
+
+ TNode<JSArray> ToJSArray(TNode<Context> const context);
+
+ private:
+ TNode<IntPtrT> NewCapacity(TNode<IntPtrT> current_capacity);
+
+ // Creates a new array with {new_capacity} and copies the first
+ // {element_count} elements from the current array.
+ TNode<FixedArray> ResizeFixedArray(TNode<IntPtrT> const element_count,
+ TNode<IntPtrT> const new_capacity);
+
+ private:
+ TVariable<FixedArray> var_array_;
+ TVariable<IntPtrT> var_length_;
+ TVariable<IntPtrT> var_capacity_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 368e6670c1..3319dd0c51 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -101,7 +101,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ push(edi);
__ CallRuntime(function_id, 1);
- __ mov(ebx, eax);
+ __ mov(ecx, eax);
// Restore target function and new target.
__ pop(edx);
@@ -110,15 +110,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(eax);
}
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-}
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
namespace {
@@ -224,7 +218,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorMask));
+ Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -345,7 +339,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -590,6 +584,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -748,10 +743,12 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax, feedback_vector);
- __ add(optimized_code_entry, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ Move(ecx, optimized_code_entry);
+ __ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ pop(edx);
__ pop(eax);
- __ jmp(optimized_code_entry);
+ __ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -767,10 +764,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -780,11 +780,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
- __ j(above, &load_size, Label::kNear);
+ __ j(above, &process_bytecode, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
@@ -792,7 +792,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
+ __ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -801,8 +801,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpb(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ j(equal, if_return, Label::kNear);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
@@ -828,7 +837,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -931,9 +940,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ call(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -946,16 +956,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ cmpb(ebx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ j(equal, &do_return, Label::kNear);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1262,9 +1269,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
- times_pointer_size, 0));
- __ jmp(ebx);
+ __ mov(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, ebx, times_pointer_size, 0));
+ __ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1280,8 +1288,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, ebx, edx);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, ebx, ecx,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
@@ -1289,6 +1299,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1306,7 +1320,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = ebx;
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1319,7 +1333,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1348,7 +1366,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1531,6 +1549,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
@@ -2093,7 +2112,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2566,6 +2585,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// eax : expected number of arguments
// edx : new target (passed through to callee)
// edi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
@@ -2581,6 +2601,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
index 978563cab5..cf2df277c9 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 7af02bb32e..e2d4421f86 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -154,12 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -181,7 +175,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
namespace {
@@ -285,7 +281,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
@@ -406,7 +402,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -656,8 +652,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, t0);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
__ bind(&prepare_step_in_if_stepping);
@@ -807,7 +805,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Jump(optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
@@ -821,10 +821,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1, Register scratch2) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -834,10 +837,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
- __ Branch(&load_size, hi, bytecode, Operand(1));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
@@ -846,7 +849,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -856,8 +859,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
__ lw(scratch2, MemOperand(scratch2));
__ Addu(bytecode_offset, bytecode_offset, scratch2);
@@ -886,7 +897,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -992,10 +1003,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a0, MemOperand(a0));
- __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
- __ lw(at, MemOperand(at));
- __ Call(at);
+ __ lbu(t3, MemOperand(a0));
+ __ Lsa(at, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
+ __ lw(kJavaScriptCallCodeStartRegister, MemOperand(at));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1007,18 +1018,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lw(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
-
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
- __ Branch(&do_return, eq, a1,
- Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1219,10 +1226,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a1, MemOperand(a1));
- __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
- __ lw(a1, MemOperand(a1));
- __ Jump(a1);
+ __ lbu(t3, MemOperand(a1));
+ __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
+ __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1241,14 +1248,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1266,7 +1279,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = a2;
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1280,7 +1293,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1309,7 +1326,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1484,8 +1501,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
}
namespace {
@@ -1984,7 +2003,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
@@ -2510,8 +2529,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3 : new target (passed through to callee)
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Call(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(a2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2524,8 +2545,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(a2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 266393070c..80ac1fadb1 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -154,13 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -181,8 +174,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
namespace {
@@ -287,7 +281,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kDerivedConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
// If not derived class constructor: Allocate the new receiver object.
@@ -408,7 +402,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t2, t2, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -547,6 +541,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
@@ -806,9 +801,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ Daddu(optimized_code_entry, optimized_code_entry,
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Daddu(a2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
@@ -822,10 +819,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1, Register scratch2) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -834,10 +834,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
- __ Branch(&load_size, hi, bytecode, Operand(1));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
@@ -846,7 +846,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size);
+ __ jmp(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -856,8 +856,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
__ Lw(scratch2, MemOperand(scratch2));
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
@@ -886,7 +894,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -992,10 +1000,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
masm->isolate())));
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ Lbu(a0, MemOperand(a0));
- __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
- __ Ld(at, MemOperand(at));
- __ Call(at);
+ __ Lbu(a7, MemOperand(a0));
+ __ Dlsa(at, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(at));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1008,17 +1016,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a1, MemOperand(a1));
- __ Branch(&do_return, eq, a1,
- Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1219,10 +1224,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ Lbu(a1, MemOperand(a1));
- __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
- __ Ld(a1, MemOperand(a1));
- __ Jump(a1);
+ __ Lbu(a7, MemOperand(a1));
+ __ Dlsa(a1, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
+ __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1241,14 +1246,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, a1, a2, a3);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1266,7 +1277,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = a2;
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1280,7 +1291,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1309,7 +1324,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -1486,9 +1501,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ Ld(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t0);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
}
namespace {
@@ -1624,6 +1640,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load receiver into a1, argArray into a2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
@@ -1732,6 +1749,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
@@ -1786,6 +1804,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register undefined_value = a4;
Register scratch = a5;
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
@@ -2008,7 +2028,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorMask));
+ __ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
@@ -2531,9 +2551,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3: new target (passed through to callee)
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(a2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2546,9 +2567,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
- __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a2);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 34da70ff0f..7ae635b0c1 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -179,8 +172,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(r3, r4, r6);
__ SmiUntag(r3);
}
- __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
namespace {
@@ -293,7 +287,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7, SharedFunctionInfo::kDerivedConstructorMask, r0);
+ __ TestBitMask(r7, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver, cr0);
// If not derived class constructor: Allocate the new receiver object.
@@ -420,7 +414,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r7, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r7, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver, cr0);
} else {
@@ -563,9 +557,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ mr(r6, r4);
__ mr(r4, r7);
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
__ bind(&prepare_step_in_if_stepping);
@@ -827,10 +822,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ addi(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ addi(r5, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mr(ip, optimized_code_entry);
- __ Jump(optimized_code_entry);
+ __ mr(ip, r5);
+ __ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -844,10 +840,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@@ -857,11 +856,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpi(bytecode, Operand(0x1));
- __ bgt(&load_size);
+ __ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -869,7 +868,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+ __ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -879,7 +878,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpi(bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ beq(if_return);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftImm(scratch2, bytecode, Operand(2));
__ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
__ add(bytecode_offset, bytecode_offset, scratch2);
@@ -908,7 +917,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
@@ -1021,11 +1030,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Call(ip);
+ __ ShiftLeftImm(r6, r6, Operand(kPointerSizeLog2));
+ __ LoadPX(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r6));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1039,16 +1049,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ cmpi(r4, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ beq(&do_return);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r4, r5,
+ &do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@@ -1251,11 +1258,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Jump(ip);
+ __ ShiftLeftImm(ip, ip, Operand(kPointerSizeLog2));
+ __ LoadPX(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1271,8 +1279,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r4, r5);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r4, r5,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
@@ -1280,6 +1290,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1297,7 +1311,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r5;
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1311,7 +1325,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1340,7 +1358,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
@@ -1524,9 +1542,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
}
namespace {
@@ -2051,7 +2070,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
@@ -2443,8 +2462,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r3, r5);
__ blt(&too_few);
__ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -2460,7 +2477,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
// adjust for return address and receiver
@@ -2474,7 +2490,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5: expected number of arguments
// r6: new target (passed through to callee)
// r7: copy end address
- // ip: code entry to call
Label copy;
__ bind(&copy);
@@ -2498,7 +2513,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
@@ -2507,7 +2521,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -2521,7 +2534,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
- // ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
__ sub(r7, fp, r7);
@@ -2543,7 +2555,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3 : expected number of arguments
// r4 : function (passed through to callee)
// r6 : new target (passed through to callee)
- __ CallJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(r5);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2556,7 +2571,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r5);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 020b04b91d..9d7bc3fb80 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -179,8 +172,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(r2, r3, r5);
__ SmiUntag(r2);
}
- __ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
namespace {
@@ -288,7 +282,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kDerivedConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
@@ -414,7 +408,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver);
} else {
__ b(&use_receiver);
@@ -558,9 +552,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ LoadRR(r5, r3);
__ LoadRR(r3, r6);
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
__ bind(&prepare_step_in_if_stepping);
@@ -830,9 +825,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ AddP(optimized_code_entry, optimized_code_entry,
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ AddP(r4, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(optimized_code_entry);
+ __ Jump(r4);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -846,10 +842,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@@ -859,11 +858,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ CmpP(bytecode, Operand(0x1));
- __ bgt(&load_size);
+ __ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@@ -871,7 +870,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ b(&load_size);
+ __ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -881,7 +880,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ CmpP(bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ beq(if_return);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftP(scratch2, bytecode, Operand(2));
__ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
__ AddP(bytecode_offset, bytecode_offset, scratch2);
@@ -911,7 +920,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
@@ -1020,11 +1029,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Call(ip);
+ __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
+ __ LoadP(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, r5));
+ __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1038,16 +1048,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ CmpP(r3, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ beq(&do_return);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r3, r4,
+ &do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@@ -1248,11 +1255,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
+ __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ Jump(ip);
+ __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2));
+ __ LoadP(kJavaScriptCallCodeStartRegister,
+ MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ Jump(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1268,8 +1276,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, r3, r4);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, r3, r4,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
@@ -1277,6 +1287,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1294,7 +1308,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = r4;
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
@@ -1308,7 +1322,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
@@ -1337,7 +1355,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
@@ -1520,9 +1538,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
}
namespace {
@@ -2048,7 +2067,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBitMask(r5, SharedFunctionInfo::kClassConstructorMask, r0);
+ __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
@@ -2442,8 +2461,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CmpP(r2, r4);
__ blt(&too_few);
__ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -2459,7 +2476,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ AddP(r2, fp);
// adjust for return address and receiver
@@ -2473,7 +2489,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: expected number of arguments
// r5: new target (passed through to callee)
// r6: copy end address
- // ip: code entry to call
Label copy;
__ bind(&copy);
@@ -2497,7 +2512,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ lay(r2, MemOperand(r2, fp));
@@ -2506,7 +2520,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
- // ip: code entry to call
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -2519,7 +2532,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r3: function
// r4: expected number of argumentus
- // ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, fp, r6);
@@ -2541,7 +2553,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r2 : expected number of arguments
// r3 : function (passed through to callee)
// r5 : new target (passed through to callee)
- __ CallJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallJSEntry(r4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -2554,7 +2569,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ JumpToJSEntry(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(r4);
__ bind(&stack_overflow);
{
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 5a09658867..d30cd02ab5 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -186,7 +186,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
if (!target->is_builtin()) continue;
Code* new_target =
Code::cast(builtins->builtins_[target->builtin_index()]);
- rinfo->set_target_address(isolate, new_target->instruction_start(),
+ rinfo->set_target_address(new_target->instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rinfo->rmode()));
@@ -202,7 +202,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
flush_icache = true;
}
if (flush_icache) {
- Assembler::FlushICache(isolate, code->instruction_start(),
+ Assembler::FlushICache(code->instruction_start(),
code->instruction_size());
}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index cd35abb362..898fe9c14c 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -87,15 +87,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movp(kScratchRegister,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
- __ jmp(kScratchRegister);
-}
-
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
@@ -115,7 +106,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Push(rdi);
__ CallRuntime(function_id, 1);
- __ movp(rbx, rax);
+ __ movp(rcx, rax);
// Restore target function and new target.
__ Pop(rdx);
@@ -123,8 +114,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(rax);
__ SmiToInteger32(rax, rax);
}
- __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
namespace {
@@ -230,7 +222,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kDerivedConstructorMask));
+ Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
@@ -350,7 +342,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -660,6 +652,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
@@ -820,9 +813,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
- __ addp(optimized_code_entry,
- Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(optimized_code_entry);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ Move(rcx, optimized_code_entry);
+ __ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rcx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
@@ -836,10 +830,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
-// handlers do upon completion of the underlying operation.
-static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
- Register bytecode_offset, Register bytecode,
- Register scratch1) {
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Label* if_return) {
Register bytecode_size_table = scratch1;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
@@ -848,11 +845,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
ExternalReference::bytecode_size_table_address(masm->isolate()));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
- Label load_size, extra_wide;
+ Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
- __ j(above, &load_size, Label::kNear);
+ __ j(above, &process_bytecode, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
@@ -860,7 +857,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
- __ jmp(&load_size, Label::kNear);
+ __ jmp(&process_bytecode, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@@ -869,8 +866,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ addp(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
- // Load the size of the current bytecode.
- __ bind(&load_size);
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ cmpb(bytecode, \
+ Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
+ __ j(equal, if_return, Label::kNear);
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // Otherwise, load the size of the current bytecode and advance the offset.
__ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
@@ -896,7 +902,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the feedback vector from the closure.
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
@@ -1000,11 +1006,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
- times_pointer_size, 0));
- __ call(rbx);
+ __ movp(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
@@ -1018,16 +1025,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
- // Check if we should return.
+ // Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ cmpb(rbx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
- __ j(equal, &do_return, Label::kNear);
-
- // Advance to the next bytecode and dispatch.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, rbx, rcx);
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, rbx, rcx,
+ &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1200,6 +1204,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ // TODO(jgruber,v8:6666): Update logic once builtin is off-heap-safe.
+ DCHECK(!Builtins::IsOffHeapSafe(Builtins::kInterpreterEntryTrampoline));
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
@@ -1234,11 +1240,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzxbp(r11, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
- times_pointer_size, 0));
- __ jmp(rbx);
+ __ movp(
+ kJavaScriptCallCodeStartRegister,
+ Operand(kInterpreterDispatchTableRegister, r11, times_pointer_size, 0));
+ __ jmp(kJavaScriptCallCodeStartRegister);
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
@@ -1255,14 +1262,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
- AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, rbx, rcx);
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, rbx, rcx,
+ &if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
Generate_InterpreterEnterBytecode(masm);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
@@ -1280,7 +1293,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
// Get the feedback vector.
Register feedback_vector = rbx;
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// The feedback vector must be defined.
@@ -1293,7 +1306,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Otherwise, tail call the SFI code.
- GenerateTailCallToSharedCode(masm);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
@@ -1325,7 +1342,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+ FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
@@ -2015,6 +2032,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// rax : expected number of arguments
// rdx : new target (passed through to callee)
// rdi : function (passed through to callee)
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
@@ -2030,6 +2048,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
@@ -2196,7 +2215,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
- Immediate(SharedFunctionInfo::kClassConstructorMask));
+ Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
// ----------- S t a t e -------------
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 4199ec3bbe..125d15d61b 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -9,15 +9,22 @@
#include "src/base/platform/mutex.h"
#include "src/globals.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
class AbstractCode;
+class InstructionStream;
class Name;
class SharedFunctionInfo;
class String;
+namespace wasm {
+class WasmCode;
+using WasmName = Vector<const char>;
+} // namespace wasm
+
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation") \
V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
@@ -64,10 +71,15 @@ class CodeEventListener {
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source,
int line, int column) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ wasm::WasmName name) = 0;
virtual void CallbackEvent(Name* name, Address entry_point) = 0;
virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
+ virtual void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) = 0;
virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
@@ -114,6 +126,10 @@ class CodeEventDispatcher {
CODE_EVENT_DISPATCH(
CodeCreateEvent(tag, code, shared, source, line, column));
}
+ void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ wasm::WasmName name) {
+ CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, name));
+ }
void CallbackEvent(Name* name, Address entry_point) {
CODE_EVENT_DISPATCH(CallbackEvent(name, entry_point));
}
@@ -126,6 +142,11 @@ class CodeEventDispatcher {
void RegExpCodeCreateEvent(AbstractCode* code, String* source) {
CODE_EVENT_DISPATCH(RegExpCodeCreateEvent(code, source));
}
+ void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) {
+ CODE_EVENT_DISPATCH(InstructionStreamCreateEvent(tag, stream, description));
+ }
void CodeMoveEvent(AbstractCode* from, Address to) {
CODE_EVENT_DISPATCH(CodeMoveEvent(from, to));
}
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 2027d208ab..d3c81d0e81 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -1,7 +1,9 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#include "src/code-stub-assembler.h"
+
#include "src/code-factory.h"
#include "src/frames-inl.h"
#include "src/frames.h"
@@ -45,6 +47,23 @@ void CodeStubAssembler::HandleBreakOnNode() {
BreakOnNode(node_id);
}
+void CodeStubAssembler::Assert(const BranchGenerator& branch,
+ const char* message, const char* file, int line,
+ Node* extra_node1, const char* extra_node1_name,
+ Node* extra_node2, const char* extra_node2_name,
+ Node* extra_node3, const char* extra_node3_name,
+ Node* extra_node4, const char* extra_node4_name,
+ Node* extra_node5,
+ const char* extra_node5_name) {
+#if defined(DEBUG)
+ if (FLAG_debug_code) {
+ Check(branch, message, file, line, extra_node1, extra_node1_name,
+ extra_node2, extra_node2_name, extra_node3, extra_node3_name,
+ extra_node4, extra_node4_name, extra_node5, extra_node5_name);
+ }
+#endif
+}
+
void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
@@ -74,7 +93,7 @@ void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node,
} // namespace
#endif
-void CodeStubAssembler::Check(const NodeGenerator& condition_body,
+void CodeStubAssembler::Check(const BranchGenerator& branch,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
@@ -88,9 +107,7 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
} else {
Comment("[ Assert");
}
- Node* condition = condition_body();
- DCHECK_NOT_NULL(condition);
- Branch(condition, &ok, &not_ok);
+ branch(&ok, &not_ok);
BIND(&not_ok);
DCHECK_NOT_NULL(message);
@@ -119,6 +136,24 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
Comment("] Assert");
}
+void CodeStubAssembler::Check(const NodeGenerator& condition_body,
+ const char* message, const char* file, int line,
+ Node* extra_node1, const char* extra_node1_name,
+ Node* extra_node2, const char* extra_node2_name,
+ Node* extra_node3, const char* extra_node3_name,
+ Node* extra_node4, const char* extra_node4_name,
+ Node* extra_node5, const char* extra_node5_name) {
+ BranchGenerator branch = [=](Label* ok, Label* not_ok) {
+ Node* condition = condition_body();
+ DCHECK_NOT_NULL(condition);
+ Branch(condition, ok, not_ok);
+ };
+
+ Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2,
+ extra_node2_name, extra_node3, extra_node3_name, extra_node4,
+ extra_node4_name, extra_node5, extra_node5_name);
+}
+
Node* CodeStubAssembler::Select(SloppyTNode<BoolT> condition,
const NodeGenerator& true_body,
const NodeGenerator& false_body,
@@ -503,7 +538,7 @@ Node* CodeStubAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
-TNode<Smi> CodeStubAssembler::SmiFromWord32(SloppyTNode<Int32T> value) {
+TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
return BitcastWordToTaggedSigned(
WordShl(value_intptr, SmiShiftBitsConstant()));
@@ -526,13 +561,13 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
}
-TNode<Int32T> CodeStubAssembler::SmiToWord32(SloppyTNode<Smi> value) {
+TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
TNode<IntPtrT> result = SmiUntag(value);
- return TruncateWordToWord32(result);
+ return TruncateIntPtrToInt32(result);
}
TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) {
- return ChangeInt32ToFloat64(SmiToWord32(value));
+ return ChangeInt32ToFloat64(SmiToInt32(value));
}
TNode<Smi> CodeStubAssembler::SmiMax(SloppyTNode<Smi> a, SloppyTNode<Smi> b) {
@@ -581,6 +616,45 @@ TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
return TNode<Object>::UncheckedCast(result.value());
}
+TNode<IntPtrT> CodeStubAssembler::ConvertToRelativeIndex(
+ TNode<Context> context, TNode<Object> index, TNode<IntPtrT> length) {
+ TVARIABLE(IntPtrT, result);
+
+ TNode<Number> const index_int =
+ ToInteger_Inline(context, index, CodeStubAssembler::kTruncateMinusZero);
+ TNode<IntPtrT> zero = IntPtrConstant(0);
+
+ Label done(this);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+ Branch(TaggedIsSmi(index_int), &if_issmi, &if_isheapnumber);
+
+ BIND(&if_issmi);
+ {
+ TNode<Smi> const index_smi = CAST(index_int);
+ result = Select<IntPtrT>(
+ IntPtrLessThan(SmiUntag(index_smi), zero),
+ [&] { return IntPtrMax(IntPtrAdd(length, SmiUntag(index_smi)), zero); },
+ [&] { return IntPtrMin(SmiUntag(index_smi), length); },
+ MachineType::PointerRepresentation());
+ Goto(&done);
+ }
+
+ BIND(&if_isheapnumber);
+ {
+ // If {index} is a heap number, it is definitely out of bounds. If it is
+ // negative, {index} = max({length} + {index}),0) = 0'. If it is positive,
+ // set {index} to {length}.
+ TNode<HeapNumber> const index_hn = CAST(index_int);
+ TNode<Float64T> const float_zero = Float64Constant(0.);
+ TNode<Float64T> const index_float = LoadHeapNumberValue(index_hn);
+ result = SelectConstant(Float64LessThan(index_float, float_zero), zero,
+ length, MachineType::PointerRepresentation());
+ Goto(&done);
+ }
+ BIND(&done);
+ return result.value();
+}
+
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
VARIABLE(var_result, MachineRepresentation::kTagged);
Label return_result(this, &var_result),
@@ -588,8 +662,8 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
return_nan(this, Label::kDeferred);
// Untag {a} and {b}.
- a = SmiToWord32(a);
- b = SmiToWord32(b);
+ a = SmiToInt32(a);
+ b = SmiToInt32(b);
// Return NaN if {b} is zero.
GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan);
@@ -603,7 +677,7 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
{
// Fast case, don't need to check any other edge cases.
Node* r = Int32Mod(a, b);
- var_result.Bind(SmiFromWord32(r));
+ var_result.Bind(SmiFromInt32(r));
Goto(&return_result);
}
@@ -627,7 +701,7 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero);
// The remainder {r} can be outside the valid Smi range on 32bit
- // architectures, so we cannot just say SmiFromWord32(r) here.
+ // architectures, so we cannot just say SmiFromInt32(r) here.
var_result.Bind(ChangeInt32ToTagged(r));
Goto(&return_result);
}
@@ -652,8 +726,8 @@ TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
Label return_result(this, &var_result);
// Both {a} and {b} are Smis. Convert them to integers and multiply.
- Node* lhs32 = SmiToWord32(a);
- Node* rhs32 = SmiToWord32(b);
+ Node* lhs32 = SmiToInt32(a);
+ Node* rhs32 = SmiToInt32(b);
Node* pair = Int32MulWithOverflow(lhs32, rhs32);
Node* overflow = Projection(1, pair);
@@ -702,7 +776,7 @@ TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
}
BIND(&return_result);
- return var_result;
+ return var_result.value();
}
Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
@@ -724,8 +798,8 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
}
BIND(&dividend_is_not_zero);
- Node* untagged_divisor = SmiToWord32(divisor);
- Node* untagged_dividend = SmiToWord32(dividend);
+ Node* untagged_divisor = SmiToInt32(divisor);
+ Node* untagged_dividend = SmiToInt32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
@@ -749,10 +823,10 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout);
- return SmiFromWord32(untagged_result);
+ return SmiFromInt32(untagged_result);
}
-TNode<Int32T> CodeStubAssembler::TruncateWordToWord32(
+TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
SloppyTNode<IntPtrT> value) {
if (Is64()) {
return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
@@ -860,16 +934,44 @@ TNode<BoolT> CodeStubAssembler::IsFastJSArray(SloppyTNode<Object> object,
TVARIABLE(BoolT, var_result);
BIND(&if_true);
{
- var_result = ReinterpretCast<BoolT>(Int32Constant(1));
+ var_result = Int32TrueConstant();
+ Goto(&exit);
+ }
+ BIND(&if_false);
+ {
+ var_result = Int32FalseConstant();
Goto(&exit);
}
+ BIND(&exit);
+ return var_result.value();
+}
+
+TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
+ TNode<Object> object, TNode<Context> context,
+ TNode<Context> native_context) {
+ Label if_false(this, Label::kDeferred), if_fast(this), exit(this);
+ GotoIfForceSlowPath(&if_false);
+ TVARIABLE(BoolT, var_result, Int32TrueConstant());
+ BranchIfFastJSArray(object, context, &if_fast, &if_false);
+ BIND(&if_fast);
+ {
+ // Check if the Array.prototype[@@iterator] may have changed.
+ GotoIfNot(InitialArrayPrototypeHasInitialArrayPrototypeMap(native_context),
+ &if_false);
+ // Check if array[@@iterator] may have changed.
+ GotoIfNot(HasInitialFastElementsKindMap(native_context, CAST(object)),
+ &if_false);
+ // Check if the array iterator has changed.
+ Branch(HasInitialArrayIteratorPrototypeMap(native_context), &exit,
+ &if_false);
+ }
BIND(&if_false);
{
- var_result = ReinterpretCast<BoolT>(Int32Constant(0));
+ var_result = Int32FalseConstant();
Goto(&exit);
}
BIND(&exit);
- return var_result;
+ return var_result.value();
}
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
@@ -1180,7 +1282,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
return ChangeInt32ToIntPtr(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
- return SmiToWord(LoadObjectField(object, offset, MachineType::AnyTagged()));
+ return SmiToIntPtr(
+ LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
@@ -1193,7 +1296,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
return UncheckedCast<Int32T>(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
- return SmiToWord32(
+ return SmiToInt32(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
@@ -1206,7 +1309,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
return ChangeInt32ToIntPtr(
Load(MachineType::Int32(), base, IntPtrConstant(index)));
} else {
- return SmiToWord(
+ return SmiToIntPtr(
Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
}
}
@@ -1222,8 +1325,8 @@ Node* CodeStubAssembler::LoadAndUntagToWord32Root(
#endif
return Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index));
} else {
- return SmiToWord32(Load(MachineType::AnyTagged(), roots_array_start,
- IntPtrConstant(index)));
+ return SmiToInt32(Load(MachineType::AnyTagged(), roots_array_start,
+ IntPtrConstant(index)));
}
}
@@ -1265,6 +1368,37 @@ Node* CodeStubAssembler::HasInstanceType(Node* object,
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
+TNode<BoolT> CodeStubAssembler::HasInitialArrayIteratorPrototypeMap(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<Map> arr_it_proto_map = LoadMap(CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)));
+ TNode<Map> initial_map = CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ return WordEqual(arr_it_proto_map, initial_map);
+}
+
+TNode<BoolT>
+CodeStubAssembler::InitialArrayPrototypeHasInitialArrayPrototypeMap(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<Map> proto_map = LoadMap(CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX)));
+ TNode<Map> initial_map = CAST(LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_MAP_INDEX));
+ return WordEqual(proto_map, initial_map);
+}
+
+TNode<BoolT> CodeStubAssembler::HasInitialFastElementsKindMap(
+ TNode<Context> native_context, TNode<JSArray> jsarray) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<Map> map = LoadMap(jsarray);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(map);
+ TNode<Map> initial_jsarray_element_map =
+ LoadJSArrayElementsMap(elements_kind, native_context);
+ return WordEqual(initial_jsarray_element_map, map);
+}
+
Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
InstanceType instance_type) {
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
@@ -1302,7 +1436,7 @@ TNode<FixedArrayBase> CodeStubAssembler::LoadElements(
TNode<Object> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
CSA_ASSERT(this, IsJSArray(array));
- return CAST(LoadObjectField(array, JSArray::kLengthOffset));
+ return LoadObjectField(array, JSArray::kLengthOffset);
}
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
@@ -1361,7 +1495,7 @@ TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
TNode<Object> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- return CAST(LoadObjectField(map, Map::kPrototypeOffset));
+ return LoadObjectField(map, Map::kPrototypeOffset);
}
TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
@@ -1412,16 +1546,16 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
Goto(&loop);
BIND(&loop);
{
- GotoIf(TaggedIsSmi(result), &done);
+ GotoIf(TaggedIsSmi(result.value()), &done);
Node* is_map_type =
- InstanceTypeEqual(LoadInstanceType(CAST(result)), MAP_TYPE);
+ InstanceTypeEqual(LoadInstanceType(CAST(result.value())), MAP_TYPE);
GotoIfNot(is_map_type, &done);
- result =
- LoadObjectField(CAST(result), Map::kConstructorOrBackPointerOffset);
+ result = LoadObjectField(CAST(result.value()),
+ Map::kConstructorOrBackPointerOffset);
Goto(&loop);
}
BIND(&done);
- return result;
+ return result.value();
}
Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
@@ -1487,11 +1621,11 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
BIND(&done);
if (if_no_hash != nullptr) {
- GotoIf(
- IntPtrEqual(var_hash, IntPtrConstant(PropertyArray::kNoHashSentinel)),
- if_no_hash);
+ GotoIf(IntPtrEqual(var_hash.value(),
+ IntPtrConstant(PropertyArray::kNoHashSentinel)),
+ if_no_hash);
}
- return var_hash;
+ return var_hash.value();
}
TNode<Uint32T> CodeStubAssembler::LoadNameHashField(SloppyTNode<Name> name) {
@@ -1555,7 +1689,7 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IntPtrGreaterThanOrEqual(
- ParameterToWord(index_node, parameter_mode),
+ ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0)));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
@@ -1564,64 +1698,194 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
return Load(MachineType::AnyTagged(), object, offset);
}
-Node* CodeStubAssembler::LoadFixedTypedArrayElement(
+TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
+ TNode<FixedTypedArrayBase> typed_array) {
+ // Backing store = external_pointer + base_pointer.
+ Node* external_pointer =
+ LoadObjectField(typed_array, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* base_pointer =
+ LoadObjectField(typed_array, FixedTypedArrayBase::kBasePointerOffset);
+ return UncheckedCast<RawPtrT>(
+ IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
+}
+
+Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
+ Node* data_pointer, Node* offset) {
+ TVARIABLE(BigInt, var_result);
+ Label done(this), if_zero(this);
+ if (Is64()) {
+ TNode<IntPtrT> value = UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), data_pointer, offset));
+ Label if_positive(this), if_negative(this);
+ GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateRawBigInt(IntPtrConstant(1));
+ Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive,
+ &if_negative);
+
+ BIND(&if_positive);
+ {
+ StoreBigIntBitfield(var_result.value(),
+ IntPtrConstant(BigInt::SignBits::encode(false) |
+ BigInt::LengthBits::encode(1)));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(value));
+ Goto(&done);
+ }
+
+ BIND(&if_negative);
+ {
+ StoreBigIntBitfield(var_result.value(),
+ IntPtrConstant(BigInt::SignBits::encode(true) |
+ BigInt::LengthBits::encode(1)));
+ StoreBigIntDigit(var_result.value(), 0,
+ Unsigned(IntPtrSub(IntPtrConstant(0), value)));
+ Goto(&done);
+ }
+ } else {
+ DCHECK(!Is64());
+ TVARIABLE(WordT, var_sign, IntPtrConstant(BigInt::SignBits::encode(false)));
+ TVARIABLE(IntPtrT, var_low);
+ TVARIABLE(IntPtrT, var_high);
+ var_low = UncheckedCast<IntPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ var_high = UncheckedCast<IntPtrT>(
+ Load(MachineType::UintPtr(), data_pointer,
+ Int32Add(offset, Int32Constant(kPointerSize))));
+
+ Label high_zero(this), negative(this), allocate_one_digit(this),
+ allocate_two_digits(this);
+
+ GotoIf(WordEqual(var_high.value(), IntPtrConstant(0)), &high_zero);
+ Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative,
+ &allocate_two_digits);
+
+ BIND(&high_zero);
+ Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &if_zero,
+ &allocate_one_digit);
+
+ BIND(&negative);
+ {
+ var_sign = IntPtrConstant(BigInt::SignBits::encode(true));
+ // We must negate the value by computing "0 - (high|low)", performing
+ // both parts of the subtraction separately and manually taking care
+ // of the carry bit (which is 1 iff low != 0).
+ var_high = IntPtrSub(IntPtrConstant(0), var_high.value());
+ Label carry(this), no_carry(this);
+ Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry);
+ BIND(&carry);
+ var_high = IntPtrSub(var_high.value(), IntPtrConstant(1));
+ Goto(&no_carry);
+ BIND(&no_carry);
+ var_low = IntPtrSub(IntPtrConstant(0), var_low.value());
+ // var_high was non-zero going into this block, but subtracting the
+ // carry bit from it could bring us back onto the "one digit" path.
+ Branch(WordEqual(var_high.value(), IntPtrConstant(0)),
+ &allocate_one_digit, &allocate_two_digits);
+ }
+
+ BIND(&allocate_one_digit);
+ {
+ var_result = AllocateRawBigInt(IntPtrConstant(1));
+ StoreBigIntBitfield(
+ var_result.value(),
+ WordOr(var_sign.value(),
+ IntPtrConstant(BigInt::LengthBits::encode(1))));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
+ Goto(&done);
+ }
+
+ BIND(&allocate_two_digits);
+ {
+ var_result = AllocateRawBigInt(IntPtrConstant(2));
+ StoreBigIntBitfield(
+ var_result.value(),
+ WordOr(var_sign.value(),
+ IntPtrConstant(BigInt::LengthBits::encode(2))));
+ StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
+ StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value()));
+ Goto(&done);
+ }
+ }
+ BIND(&if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(0));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
+ Node* data_pointer, Node* offset) {
+ TVARIABLE(BigInt, var_result);
+ Label if_zero(this), done(this);
+ if (Is64()) {
+ TNode<UintPtrT> value = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(1));
+ StoreBigIntDigit(var_result.value(), 0, value);
+ Goto(&done);
+ } else {
+ DCHECK(!Is64());
+ Label high_zero(this);
+
+ TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer,
+ Int32Add(offset, Int32Constant(kPointerSize))));
+
+ GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero);
+ var_result = AllocateBigInt(IntPtrConstant(2));
+ StoreBigIntDigit(var_result.value(), 0, low);
+ StoreBigIntDigit(var_result.value(), 1, high);
+ Goto(&done);
+
+ BIND(&high_zero);
+ GotoIf(WordEqual(low, IntPtrConstant(0)), &if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(1));
+ StoreBigIntDigit(var_result.value(), 0, low);
+ Goto(&done);
+ }
+ BIND(&if_zero);
+ var_result = AllocateBigInt(IntPtrConstant(0));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
Node* offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
- MachineType type;
switch (elements_kind) {
case UINT8_ELEMENTS: /* fall through */
case UINT8_CLAMPED_ELEMENTS:
- type = MachineType::Uint8();
- break;
+ return SmiFromInt32(Load(MachineType::Uint8(), data_pointer, offset));
case INT8_ELEMENTS:
- type = MachineType::Int8();
- break;
+ return SmiFromInt32(Load(MachineType::Int8(), data_pointer, offset));
case UINT16_ELEMENTS:
- type = MachineType::Uint16();
- break;
+ return SmiFromInt32(Load(MachineType::Uint16(), data_pointer, offset));
case INT16_ELEMENTS:
- type = MachineType::Int16();
- break;
+ return SmiFromInt32(Load(MachineType::Int16(), data_pointer, offset));
case UINT32_ELEMENTS:
- type = MachineType::Uint32();
- break;
+ return ChangeUint32ToTagged(
+ Load(MachineType::Uint32(), data_pointer, offset));
case INT32_ELEMENTS:
- type = MachineType::Int32();
- break;
+ return ChangeInt32ToTagged(
+ Load(MachineType::Int32(), data_pointer, offset));
case FLOAT32_ELEMENTS:
- type = MachineType::Float32();
- break;
+ return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(
+ Load(MachineType::Float32(), data_pointer, offset)));
case FLOAT64_ELEMENTS:
- type = MachineType::Float64();
- break;
- default:
- UNREACHABLE();
- }
- return Load(type, data_pointer, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
- ParameterMode parameter_mode) {
- Node* value = LoadFixedTypedArrayElement(data_pointer, index_node,
- elements_kind, parameter_mode);
- switch (elements_kind) {
- case ElementsKind::INT8_ELEMENTS:
- case ElementsKind::UINT8_CLAMPED_ELEMENTS:
- case ElementsKind::UINT8_ELEMENTS:
- case ElementsKind::INT16_ELEMENTS:
- case ElementsKind::UINT16_ELEMENTS:
- return SmiFromWord32(value);
- case ElementsKind::INT32_ELEMENTS:
- return ChangeInt32ToTagged(value);
- case ElementsKind::UINT32_ELEMENTS:
- return ChangeUint32ToTagged(value);
- case ElementsKind::FLOAT32_ELEMENTS:
- return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value));
- case ElementsKind::FLOAT64_ELEMENTS:
- return AllocateHeapNumberWithValue(value);
+ return AllocateHeapNumberWithValue(
+ Load(MachineType::Float64(), data_pointer, offset));
+ case BIGINT64_ELEMENTS:
+ return LoadFixedBigInt64ArrayElementAsTagged(data_pointer, offset);
+ case BIGUINT64_ELEMENTS:
+ return LoadFixedBigUint64ArrayElementAsTagged(data_pointer, offset);
default:
UNREACHABLE();
}
@@ -1656,7 +1920,7 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
if (Is64()) {
return Load(MachineType::Int32(), object, offset);
} else {
- return SmiToWord32(Load(MachineType::AnyTagged(), object, offset));
+ return SmiToInt32(Load(MachineType::AnyTagged(), object, offset));
}
}
@@ -1995,8 +2259,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
- TNode<IntPtrT> first = *arg_index;
- Node* growth = WordToParameter(
+ TNode<IntPtrT> first = arg_index->value();
+ Node* growth = IntPtrToParameter(
IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)),
first),
mode);
@@ -2028,12 +2292,12 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
var_tagged_length = length;
Node* diff = SmiSub(length, LoadFastJSArrayLength(array));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
- *arg_index = IntPtrAdd(*arg_index, SmiUntag(diff));
+ *arg_index = IntPtrAdd(arg_index->value(), SmiUntag(diff));
Goto(bailout);
}
BIND(&success);
- return var_tagged_length;
+ return var_tagged_length.value();
}
void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
@@ -2121,11 +2385,55 @@ TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
return result;
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
- AllocationFlags flags) {
+TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
+ TNode<BigInt> result = AllocateRawBigInt(length);
+ STATIC_ASSERT(BigInt::LengthBits::kShift == 0);
+ StoreBigIntBitfield(result, length);
+ return result;
+}
+
+TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
+ // This is currently used only for 64-bit wide BigInts. If more general
+ // applicability is required, a large-object check must be added.
+ CSA_ASSERT(this, UintPtrLessThan(length, IntPtrConstant(3)));
+
+ TNode<IntPtrT> size = IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
+ Signed(WordShl(length, kPointerSizeLog2)));
+ Node* raw_result = Allocate(size, kNone);
+ StoreMapNoWriteBarrier(raw_result, Heap::kBigIntMapRootIndex);
+ return UncheckedCast<BigInt>(raw_result);
+}
+
+void CodeStubAssembler::StoreBigIntBitfield(TNode<BigInt> bigint,
+ TNode<WordT> bitfield) {
+ StoreObjectFieldNoWriteBarrier(bigint, BigInt::kBitfieldOffset, bitfield,
+ MachineType::PointerRepresentation());
+}
+
+void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+ TNode<UintPtrT> digit) {
+ StoreObjectFieldNoWriteBarrier(
+ bigint, BigInt::kDigitsOffset + digit_index * kPointerSize, digit,
+ UintPtrT::kMachineRepresentation);
+}
+
+TNode<WordT> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
+ return UncheckedCast<WordT>(
+ LoadObjectField(bigint, BigInt::kBitfieldOffset, MachineType::UintPtr()));
+}
+
+TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
+ int digit_index) {
+ return UncheckedCast<UintPtrT>(LoadObjectField(
+ bigint, BigInt::kDigitsOffset + digit_index * kPointerSize,
+ MachineType::UintPtr()));
+}
+
+TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
+ int length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
if (length == 0) {
- return LoadRoot(Heap::kempty_stringRootIndex);
+ return CAST(LoadRoot(Heap::kempty_stringRootIndex));
}
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
@@ -2136,7 +2444,7 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
- return result;
+ return CAST(result);
}
Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
@@ -2153,9 +2461,8 @@ Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context,
- TNode<Smi> length,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
+ Node* context, TNode<Smi> length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -2203,14 +2510,14 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context,
}
BIND(&if_join);
- return var_result.value();
+ return CAST(var_result.value());
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
+ int length, AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
if (length == 0) {
- return LoadRoot(Heap::kempty_stringRootIndex);
+ return CAST(LoadRoot(Heap::kempty_stringRootIndex));
}
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
@@ -2221,13 +2528,12 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
- return result;
+ return CAST(result);
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context,
- TNode<Smi> length,
- AllocationFlags flags) {
- CSA_SLOW_ASSERT(this, IsFixedArray(context));
+TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
+ Node* context, TNode<Smi> length, AllocationFlags flags) {
+ CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -2274,14 +2580,14 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context,
}
BIND(&if_join);
- return var_result.value();
+ return CAST(var_result.value());
}
-Node* CodeStubAssembler::AllocateSlicedString(
- Heap::RootListIndex map_root_index, TNode<Smi> length, Node* parent,
- Node* offset) {
- CSA_ASSERT(this, IsString(parent));
- CSA_ASSERT(this, TaggedIsSmi(offset));
+TNode<String> CodeStubAssembler::AllocateSlicedString(
+ Heap::RootListIndex map_root_index, TNode<Smi> length, TNode<String> parent,
+ TNode<Smi> offset) {
+ DCHECK(map_root_index == Heap::kSlicedOneByteStringMapRootIndex ||
+ map_root_index == Heap::kSlicedStringMapRootIndex);
Node* result = Allocate(SlicedString::kSize);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
@@ -2294,29 +2600,26 @@ Node* CodeStubAssembler::AllocateSlicedString(
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
MachineRepresentation::kTagged);
- return result;
+ return CAST(result);
}
-Node* CodeStubAssembler::AllocateSlicedOneByteString(TNode<Smi> length,
- Node* parent,
- Node* offset) {
+TNode<String> CodeStubAssembler::AllocateSlicedOneByteString(
+ TNode<Smi> length, TNode<String> parent, TNode<Smi> offset) {
return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
parent, offset);
}
-Node* CodeStubAssembler::AllocateSlicedTwoByteString(TNode<Smi> length,
- Node* parent,
- Node* offset) {
+TNode<String> CodeStubAssembler::AllocateSlicedTwoByteString(
+ TNode<Smi> length, TNode<String> parent, TNode<Smi> offset) {
return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
offset);
}
-Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, Node* first,
- Node* second,
- AllocationFlags flags) {
- CSA_ASSERT(this, IsString(first));
- CSA_ASSERT(this, IsString(second));
+TNode<String> CodeStubAssembler::AllocateConsString(
+ Heap::RootListIndex map_root_index, TNode<Smi> length, TNode<String> first,
+ TNode<String> second, AllocationFlags flags) {
+ DCHECK(map_root_index == Heap::kConsOneByteStringMapRootIndex ||
+ map_root_index == Heap::kConsStringMapRootIndex);
Node* result = Allocate(ConsString::kSize, flags);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
@@ -2335,29 +2638,28 @@ Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
StoreObjectField(result, ConsString::kFirstOffset, first);
StoreObjectField(result, ConsString::kSecondOffset, second);
}
- return result;
+ return CAST(result);
}
-Node* CodeStubAssembler::AllocateOneByteConsString(TNode<Smi> length,
- Node* first, Node* second,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateOneByteConsString(
+ TNode<Smi> length, TNode<String> first, TNode<String> second,
+ AllocationFlags flags) {
return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
second, flags);
}
-Node* CodeStubAssembler::AllocateTwoByteConsString(TNode<Smi> length,
- Node* first, Node* second,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::AllocateTwoByteConsString(
+ TNode<Smi> length, TNode<String> first, TNode<String> second,
+ AllocationFlags flags) {
return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
second, flags);
}
-Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
- Node* left, Node* right,
- AllocationFlags flags) {
+TNode<String> CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
+ TNode<String> left,
+ TNode<String> right,
+ AllocationFlags flags) {
CSA_ASSERT(this, IsFixedArray(context));
- CSA_ASSERT(this, IsString(left));
- CSA_ASSERT(this, IsString(right));
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
@@ -2382,7 +2684,7 @@ Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
STATIC_ASSERT(kOneByteDataHintTag != 0);
Label one_byte_map(this);
Label two_byte_map(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(String, result);
Label done(this, &result);
GotoIf(IsSetWord32(anded_instance_types,
kStringEncodingMask | kOneByteDataHintTag),
@@ -2395,12 +2697,12 @@ Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
BIND(&one_byte_map);
Comment("One-byte ConsString");
- result.Bind(AllocateOneByteConsString(length, left, right, flags));
+ result = AllocateOneByteConsString(length, left, right, flags);
Goto(&done);
BIND(&two_byte_map);
Comment("Two-byte ConsString");
- result.Bind(AllocateTwoByteConsString(length, left, right, flags));
+ result = AllocateTwoByteConsString(length, left, right, flags);
Goto(&done);
BIND(&done);
@@ -2433,7 +2735,7 @@ Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
DCHECK(Heap::RootIsImmortalImmovable(Heap::kNameDictionaryMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kNameDictionaryMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
- SmiFromWord(length));
+ SmiFromIntPtr(length));
// Initialized HashTable fields.
Node* zero = SmiConstant(0);
StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
@@ -2825,6 +3127,7 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
ParameterMode mode,
AllocationFlags flags,
Node* fixed_array_map) {
+ Comment("AllocateFixedArray");
CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode));
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
IntPtrOrSmiConstant(0, mode), mode));
@@ -2891,7 +3194,7 @@ Node* CodeStubAssembler::ExtractFixedArray(Node* fixed_array, Node* first,
Label if_fixed_double_array(this), empty(this), cow(this),
done(this, {&var_result, &var_fixed_array_map});
var_fixed_array_map.Bind(LoadMap(fixed_array));
- GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), count), &empty);
+ GotoIf(WordEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity), &empty);
if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) {
if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
@@ -3568,7 +3871,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
GotoIf(TaggedIsNotSmi(value), &not_smi);
// {value} is a Smi.
- var_word32->Bind(SmiToWord32(value));
+ var_word32->Bind(SmiToInt32(value));
CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
Goto(if_number);
@@ -3671,7 +3974,7 @@ TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
Goto(&if_join);
}
BIND(&if_join);
- return var_result;
+ return var_result.value();
}
TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
@@ -3700,7 +4003,7 @@ TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
}
Goto(&if_join);
BIND(&if_join);
- return var_result;
+ return var_result.value();
}
TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
@@ -3741,7 +4044,7 @@ TNode<Number> CodeStubAssembler::ChangeUint32ToTagged(
Goto(&if_join);
BIND(&if_join);
- return var_result;
+ return var_result.value();
}
TNode<String> CodeStubAssembler::ToThisString(Node* context, Node* value,
@@ -3773,9 +4076,8 @@ TNode<String> CodeStubAssembler::ToThisString(Node* context, Node* value,
BIND(&if_valueisnullorundefined);
{
// The {value} is either null or undefined.
- CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
- StringConstant(method_name));
- Unreachable();
+ ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
+ method_name);
}
}
}
@@ -3807,7 +4109,7 @@ TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
}
BIND(&done);
- return result;
+ return result.value();
}
TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
@@ -3823,11 +4125,11 @@ TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
BIND(&smi);
TNode<Smi> value_smi = CAST(value);
CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
- result = UncheckedCast<UintPtrT>(SmiToWord(value_smi));
+ result = UncheckedCast<UintPtrT>(SmiToIntPtr(value_smi));
Goto(&done);
BIND(&done);
- return result;
+ return result.value();
}
Node* CodeStubAssembler::TimesPointerSize(Node* value) {
@@ -3918,14 +4220,6 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
return var_value.value();
}
-void CodeStubAssembler::ThrowIncompatibleMethodReceiver(Node* context,
- const char* method_name,
- Node* receiver) {
- CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
- StringConstant(method_name), receiver);
- Unreachable();
-}
-
Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
InstanceType instance_type,
char const* method_name) {
@@ -3943,7 +4237,8 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
// The {value} is not a compatible receiver for this method.
BIND(&throw_exception);
- ThrowIncompatibleMethodReceiver(context, method_name, value);
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), value);
BIND(&out);
return var_value_map.value();
@@ -4019,19 +4314,6 @@ Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
return Word32Equal(instance_type, Int32Constant(type));
}
-Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
- Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
- uint32_t mask =
- Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
- USE(mask);
- // Interceptors or access checks imply special receiver.
- CSA_ASSERT(this,
- SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
- Int32Constant(1), MachineRepresentation::kWord32));
- return is_special;
-}
-
TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field3 = LoadMapBitField3(map);
@@ -4065,6 +4347,13 @@ Node* CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
+Node* CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(Heap::kPromiseThenProtectorRootIndex);
+ Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
Node* CodeStubAssembler::IsSpeciesProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kSpeciesProtectorRootIndex);
@@ -4081,6 +4370,18 @@ Node* CodeStubAssembler::IsPrototypeInitialArrayPrototype(Node* context,
return WordEqual(proto, initial_array_prototype);
}
+TNode<BoolT> CodeStubAssembler::IsPrototypeTypedArrayPrototype(
+ SloppyTNode<Context> context, SloppyTNode<Map> map) {
+ TNode<Context> const native_context = LoadNativeContext(context);
+ TNode<Object> const typed_array_prototype =
+ LoadContextElement(native_context, Context::TYPED_ARRAY_PROTOTYPE_INDEX);
+ TNode<Object> proto = LoadMapPrototype(map);
+ TNode<Object> proto_of_proto = Select<Object>(
+ IsJSObject(proto), [=] { return LoadMapPrototype(LoadMap(CAST(proto))); },
+ [=] { return NullConstant(); }, MachineRepresentation::kTagged);
+ return WordEqual(proto_of_proto, typed_array_prototype);
+}
+
Node* CodeStubAssembler::IsCallable(Node* object) {
return IsCallableMap(LoadMap(object));
}
@@ -4089,6 +4390,10 @@ Node* CodeStubAssembler::IsCell(Node* object) {
return WordEqual(LoadMap(object), LoadRoot(Heap::kCellMapRootIndex));
}
+Node* CodeStubAssembler::IsCode(Node* object) {
+ return HasInstanceType(object, CODE_TYPE);
+}
+
Node* CodeStubAssembler::IsConstructorMap(Node* map) {
CSA_ASSERT(this, IsMap(map));
return IsSetWord32<Map::IsConstructorBit>(LoadMapBitField(map));
@@ -4203,6 +4508,15 @@ Node* CodeStubAssembler::IsJSObject(Node* object) {
return IsJSObjectMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsJSPromiseMap(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
+ return InstanceTypeEqual(LoadMapInstanceType(map), JS_PROMISE_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSPromise(Node* object) {
+ return IsJSPromiseMap(LoadMap(object));
+}
+
Node* CodeStubAssembler::IsJSProxy(Node* object) {
return HasInstanceType(object, JS_PROXY_TYPE);
}
@@ -4237,6 +4551,10 @@ Node* CodeStubAssembler::IsJSArrayMap(Node* map) {
return IsJSArrayInstanceType(LoadMapInstanceType(map));
}
+Node* CodeStubAssembler::IsJSAsyncGeneratorObject(Node* object) {
+ return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+}
+
Node* CodeStubAssembler::IsFixedArray(Node* object) {
return HasInstanceType(object, FIXED_ARRAY_TYPE);
}
@@ -4249,6 +4567,10 @@ Node* CodeStubAssembler::IsFixedArraySubclass(Node* object) {
Int32Constant(LAST_FIXED_ARRAY_TYPE)));
}
+Node* CodeStubAssembler::IsPromiseCapability(Node* object) {
+ return HasInstanceType(object, PROMISE_CAPABILITY_TYPE);
+}
+
Node* CodeStubAssembler::IsPropertyArray(Node* object) {
return HasInstanceType(object, PROPERTY_ARRAY_TYPE);
}
@@ -4324,6 +4646,10 @@ Node* CodeStubAssembler::IsMutableHeapNumber(Node* object) {
return IsMutableHeapNumberMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsFeedbackCell(Node* object) {
+ return HasInstanceType(object, FEEDBACK_CELL_TYPE);
+}
+
Node* CodeStubAssembler::IsFeedbackVector(Node* object) {
return IsFeedbackVectorMap(LoadMap(object));
}
@@ -4364,7 +4690,7 @@ Node* CodeStubAssembler::IsPrivateSymbol(Node* object) {
[=] {
TNode<Symbol> symbol = CAST(object);
TNode<Int32T> flags =
- SmiToWord32(LoadObjectField<Smi>(symbol, Symbol::kFlagsOffset));
+ SmiToInt32(LoadObjectField<Smi>(symbol, Symbol::kFlagsOffset));
return IsSetWord32(flags, 1 << Symbol::kPrivateBit);
},
[=] { return Int32Constant(0); }, MachineRepresentation::kWord32);
@@ -4391,6 +4717,10 @@ Node* CodeStubAssembler::IsNumberDictionary(Node* object) {
LoadRoot(Heap::kNumberDictionaryMapRootIndex));
}
+Node* CodeStubAssembler::IsJSGeneratorObject(Node* object) {
+ return HasInstanceType(object, JS_GENERATOR_OBJECT_TYPE);
+}
+
Node* CodeStubAssembler::IsJSFunctionInstanceType(Node* instance_type) {
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
@@ -4553,12 +4883,12 @@ TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
{
Node* result = CallRuntime(Runtime::kStringCharCodeAt, NoContextConstant(),
string, SmiTag(index));
- var_result = SmiToWord32(result);
+ var_result = SmiToInt32(result);
Goto(&return_result);
}
BIND(&return_result);
- return var_result;
+ return var_result.value();
}
TNode<String> CodeStubAssembler::StringFromCharCode(TNode<Int32T> code) {
@@ -4623,11 +4953,11 @@ TNode<String> CodeStubAssembler::StringFromCharCode(TNode<Int32T> code) {
// given character range using CopyStringCharacters.
// |from_string| must be a sequential string.
// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
-Node* CodeStubAssembler::AllocAndCopyStringCharacters(
- Node* context, Node* from, Node* from_instance_type,
- TNode<IntPtrT> from_index, TNode<Smi> character_count) {
+TNode<String> CodeStubAssembler::AllocAndCopyStringCharacters(
+ Node* from, Node* from_instance_type, TNode<IntPtrT> from_index,
+ TNode<Smi> character_count) {
Label end(this), one_byte_sequential(this), two_byte_sequential(this);
- Variable var_result(this, MachineRepresentation::kTagged);
+ TVARIABLE(String, var_result);
Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
&two_byte_sequential);
@@ -4635,24 +4965,24 @@ Node* CodeStubAssembler::AllocAndCopyStringCharacters(
// The subject string is a sequential one-byte string.
BIND(&one_byte_sequential);
{
- Node* result = AllocateSeqOneByteString(context, character_count);
+ TNode<String> result =
+ AllocateSeqOneByteString(NoContextConstant(), character_count);
CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
SmiUntag(character_count), String::ONE_BYTE_ENCODING,
String::ONE_BYTE_ENCODING);
- var_result.Bind(result);
-
+ var_result = result;
Goto(&end);
}
// The subject string is a sequential two-byte string.
BIND(&two_byte_sequential);
{
- Node* result = AllocateSeqTwoByteString(context, character_count);
+ TNode<String> result =
+ AllocateSeqTwoByteString(NoContextConstant(), character_count);
CopyStringCharacters(from, result, from_index, IntPtrConstant(0),
SmiUntag(character_count), String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
- var_result.Bind(result);
-
+ var_result = result;
Goto(&end);
}
@@ -4660,50 +4990,34 @@ Node* CodeStubAssembler::AllocAndCopyStringCharacters(
return var_result.value();
}
-
-Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
- Node* to, SubStringFlags flags) {
- DCHECK(flags == SubStringFlags::NONE ||
- flags == SubStringFlags::FROM_TO_ARE_BOUNDED);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::SubString(TNode<String> string,
+ TNode<IntPtrT> from,
+ TNode<IntPtrT> to) {
+ TVARIABLE(String, var_result);
ToDirectStringAssembler to_direct(state(), string);
Label end(this), runtime(this);
- // Make sure first argument is a string.
- CSA_ASSERT(this, TaggedIsNotSmi(string));
- CSA_ASSERT(this, IsString(string));
-
- // Make sure that both from and to are non-negative smis.
-
- if (flags == SubStringFlags::NONE) {
- GotoIfNot(TaggedIsPositiveSmi(from), &runtime);
- GotoIfNot(TaggedIsPositiveSmi(to), &runtime);
- } else {
- CSA_ASSERT(this, TaggedIsPositiveSmi(from));
- CSA_ASSERT(this, TaggedIsPositiveSmi(to));
- }
-
- TNode<Smi> const substr_length = SmiSub(to, from);
- TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
+ TNode<IntPtrT> const substr_length = IntPtrSub(to, from);
+ TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
// Begin dispatching based on substring length.
Label original_string_or_invalid_length(this);
- GotoIf(SmiAboveOrEqual(substr_length, string_length),
+ GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length),
&original_string_or_invalid_length);
// A real substring (substr_length < string_length).
Label single_char(this);
- GotoIf(SmiEqual(substr_length, SmiConstant(1)), &single_char);
+ GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char);
// TODO(jgruber): Add an additional case for substring of length == 0?
// Deal with different string types: update the index if necessary
// and extract the underlying string.
- Node* const direct_string = to_direct.TryToDirect(&runtime);
- Node* const offset = SmiAdd(from, SmiTag(to_direct.offset()));
+ TNode<String> direct_string = to_direct.TryToDirect(&runtime);
+ TNode<IntPtrT> offset = IntPtrAdd(from, to_direct.offset());
Node* const instance_type = to_direct.instance_type();
// The subject string can only be external or sequential string of either
@@ -4714,7 +5028,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Label next(this);
// Short slice. Copy instead of slicing.
- GotoIf(SmiLessThan(substr_length, SmiConstant(SlicedString::kMinLength)),
+ GotoIf(IntPtrLessThan(substr_length,
+ IntPtrConstant(SlicedString::kMinLength)),
&next);
// Allocate new sliced string.
@@ -4728,15 +5043,15 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
BIND(&one_byte_slice);
{
- var_result.Bind(
- AllocateSlicedOneByteString(substr_length, direct_string, offset));
+ var_result = AllocateSlicedOneByteString(SmiTag(substr_length),
+ direct_string, SmiTag(offset));
Goto(&end);
}
BIND(&two_byte_slice);
{
- var_result.Bind(
- AllocateSlicedTwoByteString(substr_length, direct_string, offset));
+ var_result = AllocateSlicedTwoByteString(SmiTag(substr_length),
+ direct_string, SmiTag(offset));
Goto(&end);
}
@@ -4747,9 +5062,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// encoding at this point.
GotoIf(to_direct.is_external(), &external_string);
- var_result.Bind(
- AllocAndCopyStringCharacters(context, direct_string, instance_type,
- SmiUntag(offset), substr_length));
+ var_result = AllocAndCopyStringCharacters(direct_string, instance_type,
+ offset, SmiTag(substr_length));
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -4762,9 +5076,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
{
Node* const fake_sequential_string = to_direct.PointerToString(&runtime);
- var_result.Bind(AllocAndCopyStringCharacters(
- context, fake_sequential_string, instance_type, SmiUntag(offset),
- substr_length));
+ var_result = AllocAndCopyStringCharacters(
+ fake_sequential_string, instance_type, offset, SmiTag(substr_length));
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -4775,44 +5088,37 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Substrings of length 1 are generated through CharCodeAt and FromCharCode.
BIND(&single_char);
{
- TNode<Int32T> char_code = StringCharCodeAt(string, SmiUntag(from));
- var_result.Bind(StringFromCharCode(char_code));
+ TNode<Int32T> char_code = StringCharCodeAt(string, from);
+ var_result = StringFromCharCode(char_code);
Goto(&end);
}
BIND(&original_string_or_invalid_length);
{
- if (flags == SubStringFlags::NONE) {
- // Longer than original string's length or negative: unsafe arguments.
- GotoIf(SmiAbove(substr_length, string_length), &runtime);
- } else {
- // with flag SubStringFlags::FROM_TO_ARE_BOUNDED, the only way we can
- // get here is if substr_length is equal to string_length.
- CSA_ASSERT(this, SmiEqual(substr_length, string_length));
- }
+ CSA_ASSERT(this, IntPtrEqual(substr_length, string_length));
// Equal length - check if {from, to} == {0, str.length}.
- GotoIf(SmiAbove(from, SmiConstant(0)), &runtime);
+ GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime);
// Return the original string (substr_length == string_length).
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
- var_result.Bind(string);
+ var_result = string;
Goto(&end);
}
// Fall back to a runtime call.
BIND(&runtime);
{
- var_result.Bind(
- CallRuntime(Runtime::kSubString, context, string, from, to));
+ var_result =
+ CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string,
+ SmiTag(from), SmiTag(to)));
Goto(&end);
}
BIND(&end);
- CSA_ASSERT(this, IsString(var_result.value()));
return var_result.value();
}
@@ -4833,7 +5139,7 @@ ToDirectStringAssembler::ToDirectStringAssembler(
var_is_external_.Bind(Int32Constant(0));
}
-Node* ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
+TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
VariableList vars({&var_string_, &var_offset_, &var_instance_type_}, zone());
Label dispatch(this, vars);
Label if_iscons(this);
@@ -4916,7 +5222,7 @@ Node* ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
Goto(&out);
BIND(&out);
- return var_string_.value();
+ return CAST(var_string_.value());
}
Node* ToDirectStringAssembler::TryToSequential(StringPointerKind ptr_kind,
@@ -5034,22 +5340,23 @@ void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
// Fall through if neither string was an indirect string.
}
-Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
- AllocationFlags flags) {
- VARIABLE(result, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
+ TNode<String> right,
+ AllocationFlags flags) {
+ TVARIABLE(String, result);
Label check_right(this), runtime(this, Label::kDeferred), cons(this),
done(this, &result), done_native(this, &result);
Counters* counters = isolate()->counters();
TNode<Smi> left_length = LoadStringLengthAsSmi(left);
GotoIf(SmiNotEqual(SmiConstant(0), left_length), &check_right);
- result.Bind(right);
+ result = right;
Goto(&done_native);
BIND(&check_right);
TNode<Smi> right_length = LoadStringLengthAsSmi(right);
GotoIf(SmiNotEqual(SmiConstant(0), right_length), &cons);
- result.Bind(left);
+ result = left;
Goto(&done_native);
BIND(&cons);
@@ -5062,16 +5369,16 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
GotoIf(SmiGreaterThan(new_length, SmiConstant(String::kMaxLength)),
&runtime);
- VARIABLE(var_left, MachineRepresentation::kTagged, left);
- VARIABLE(var_right, MachineRepresentation::kTagged, right);
+ TVARIABLE(String, var_left, left);
+ TVARIABLE(String, var_right, right);
Variable* input_vars[2] = {&var_left, &var_right};
Label non_cons(this, 2, input_vars);
Label slow(this, Label::kDeferred);
GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)),
&non_cons);
- result.Bind(NewConsString(context, new_length, var_left.value(),
- var_right.value(), flags));
+ result = NewConsString(context, new_length, var_left.value(),
+ var_right.value(), flags);
Goto(&done_native);
BIND(&non_cons);
@@ -5099,29 +5406,27 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
Int32Constant(kTwoByteStringTag)),
&two_byte);
// One-byte sequential string case
- Node* new_string = AllocateSeqOneByteString(context, new_length);
- CopyStringCharacters(var_left.value(), new_string, IntPtrConstant(0),
+ result = AllocateSeqOneByteString(context, new_length);
+ CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
IntPtrConstant(0), word_left_length,
String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), new_string, IntPtrConstant(0),
+ CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
word_left_length, word_right_length,
String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING);
- result.Bind(new_string);
Goto(&done_native);
BIND(&two_byte);
{
// Two-byte sequential string case
- new_string = AllocateSeqTwoByteString(context, new_length);
- CopyStringCharacters(var_left.value(), new_string, IntPtrConstant(0),
+ result = AllocateSeqTwoByteString(context, new_length);
+ CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0),
IntPtrConstant(0), word_left_length,
String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
- CopyStringCharacters(var_right.value(), new_string, IntPtrConstant(0),
+ CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0),
word_left_length, word_right_length,
String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
- result.Bind(new_string);
Goto(&done_native);
}
@@ -5135,7 +5440,7 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
}
BIND(&runtime);
{
- result.Bind(CallRuntime(Runtime::kStringAdd, context, left, right));
+ result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right));
Goto(&done);
}
@@ -5200,8 +5505,7 @@ TNode<String> CodeStubAssembler::StringFromCodePoint(TNode<Int32T> codepoint,
return CAST(var_result.value());
}
-TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<String> input) {
- CSA_SLOW_ASSERT(this, IsString(input));
+TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) {
Label runtime(this, Label::kDeferred);
Label end(this);
@@ -5224,11 +5528,11 @@ TNode<Number> CodeStubAssembler::StringToNumber(SloppyTNode<String> input) {
}
BIND(&end);
- return var_result;
+ return var_result.value();
}
-Node* CodeStubAssembler::NumberToString(Node* argument) {
- VARIABLE(result, MachineRepresentation::kTagged);
+TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
+ TVARIABLE(String, result);
Label runtime(this, Label::kDeferred), smi(this), done(this, &result);
// Load the number string cache.
@@ -5239,23 +5543,22 @@ Node* CodeStubAssembler::NumberToString(Node* argument) {
// TODO(ishell): cleanup mask handling.
Node* mask =
BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache));
- Node* one = IntPtrConstant(1);
+ TNode<IntPtrT> one = IntPtrConstant(1);
mask = IntPtrSub(mask, one);
- GotoIf(TaggedIsSmi(argument), &smi);
+ GotoIf(TaggedIsSmi(input), &smi);
- // Argument isn't smi, check to see if it's a heap-number.
- GotoIfNot(IsHeapNumber(argument), &runtime);
+ TNode<HeapNumber> heap_number_input = CAST(input);
// Make a hash from the two 32-bit values of the double.
- Node* low =
- LoadObjectField(argument, HeapNumber::kValueOffset, MachineType::Int32());
- Node* high = LoadObjectField(argument, HeapNumber::kValueOffset + kIntSize,
- MachineType::Int32());
- Node* hash = Word32Xor(low, high);
- hash = ChangeInt32ToIntPtr(hash);
- hash = WordShl(hash, one);
- Node* index = WordAnd(hash, WordSar(mask, SmiShiftBitsConstant()));
+ TNode<Int32T> low =
+ LoadObjectField<Int32T>(heap_number_input, HeapNumber::kValueOffset);
+ TNode<Int32T> high = LoadObjectField<Int32T>(
+ heap_number_input, HeapNumber::kValueOffset + kIntSize);
+ TNode<Word32T> hash = Word32Xor(low, high);
+ TNode<WordT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
+ TNode<WordT> index =
+ WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
// Cache entry's key must be a heap number
Node* number_key = LoadFixedArrayElement(number_string_cache, index);
@@ -5272,14 +5575,15 @@ Node* CodeStubAssembler::NumberToString(Node* argument) {
// Heap number match, return value from cache entry.
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
- result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize));
+ result =
+ CAST(LoadFixedArrayElement(number_string_cache, index, kPointerSize));
Goto(&done);
BIND(&runtime);
{
// No cache entry, go to the runtime.
- result.Bind(CallRuntime(Runtime::kNumberToStringSkipCache,
- NoContextConstant(), argument));
+ result = CAST(CallRuntime(Runtime::kNumberToStringSkipCache,
+ NoContextConstant(), input));
}
Goto(&done);
@@ -5287,20 +5591,19 @@ Node* CodeStubAssembler::NumberToString(Node* argument) {
{
// Load the smi key, make sure it matches the smi we're looking for.
Node* smi_index = BitcastWordToTagged(
- WordAnd(WordShl(BitcastTaggedToWord(argument), one), mask));
+ WordAnd(WordShl(BitcastTaggedToWord(input), one), mask));
Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
SMI_PARAMETERS);
- GotoIf(WordNotEqual(smi_key, argument), &runtime);
+ GotoIf(WordNotEqual(smi_key, input), &runtime);
// Smi match, return value from cache entry.
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
- result.Bind(LoadFixedArrayElement(number_string_cache, smi_index,
- kPointerSize, SMI_PARAMETERS));
+ result = CAST(LoadFixedArrayElement(number_string_cache, smi_index,
+ kPointerSize, SMI_PARAMETERS));
Goto(&done);
}
BIND(&done);
- CSA_ASSERT(this, IsString(result.value()));
return result.value();
}
@@ -5382,7 +5685,8 @@ Node* CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&if_inputisstring);
{
// The {input} is a String, use the fast stub to convert it to a Number.
- var_result.Bind(StringToNumber(input));
+ TNode<String> string_input = CAST(input);
+ var_result.Bind(StringToNumber(string_input));
Goto(&end);
}
@@ -5479,6 +5783,31 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(
return UncheckedCast<Numeric>(result);
}
+TNode<Number> CodeStubAssembler::ToNumber_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
+ TVARIABLE(Number, var_result);
+ Label end(this), not_smi(this, Label::kDeferred);
+
+ GotoIfNot(TaggedIsSmi(input), &not_smi);
+ var_result = CAST(input);
+ Goto(&end);
+
+ BIND(&not_smi);
+ {
+ var_result =
+ Select<Number>(IsHeapNumber(input), [=] { return CAST(input); },
+ [=] {
+ return CAST(CallBuiltin(Builtins::kNonNumberToNumber,
+ context, input));
+ },
+ MachineRepresentation::kTagged);
+ Goto(&end);
+ }
+
+ BIND(&end);
+ return var_result.value();
+}
+
TNode<Number> CodeStubAssembler::ToNumber(SloppyTNode<Context> context,
SloppyTNode<Object> input,
BigIntHandling bigint_handling) {
@@ -5509,7 +5838,28 @@ TNode<Number> CodeStubAssembler::ToNumber(SloppyTNode<Context> context,
}
BIND(&end);
- return var_result;
+ return var_result.value();
+}
+
+TNode<BigInt> CodeStubAssembler::ToBigInt(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
+ TVARIABLE(BigInt, var_result);
+ Label if_bigint(this), done(this), if_throw(this);
+
+ GotoIf(TaggedIsSmi(input), &if_throw);
+ GotoIf(IsBigInt(input), &if_bigint);
+ var_result = CAST(CallRuntime(Runtime::kToBigInt, context, input));
+ Goto(&done);
+
+ BIND(&if_bigint);
+ var_result = CAST(input);
+ Goto(&done);
+
+ BIND(&if_throw);
+ ThrowTypeError(context, MessageTemplate::kBigIntFromObject, input);
+
+ BIND(&done);
+ return var_result.value();
}
void CodeStubAssembler::TaggedToNumeric(Node* context, Node* value, Label* done,
@@ -5594,7 +5944,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
BIND(&if_isnegativesmi);
{
- Node* const uint32_value = SmiToWord32(number);
+ Node* const uint32_value = SmiToInt32(number);
Node* float64_value = ChangeUint32ToFloat64(uint32_value);
var_result.Bind(AllocateHeapNumberWithValue(float64_value));
Goto(&out);
@@ -5680,7 +6030,8 @@ TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
Branch(IsHeapNumberMap(input_map), &is_number, &not_heap_number);
BIND(&is_number);
- result.Bind(NumberToString(input));
+ TNode<Number> number_input = CAST(input);
+ result.Bind(NumberToString(number_input));
Goto(&done);
BIND(&not_heap_number);
@@ -5741,59 +6092,78 @@ Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
return result.value();
}
-Node* CodeStubAssembler::ToSmiIndex(Node* const input, Node* const context,
- Label* range_error) {
- VARIABLE(result, MachineRepresentation::kTagged, input);
+TNode<JSReceiver> CodeStubAssembler::ToObject(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
+ return CAST(CallBuiltin(Builtins::kToObject, context, input));
+}
+
+TNode<Smi> CodeStubAssembler::ToSmiIndex(TNode<Object> input,
+ TNode<Context> context,
+ Label* range_error) {
+ TVARIABLE(Smi, result);
Label check_undefined(this), return_zero(this), defined(this),
negative_check(this), done(this);
- Branch(TaggedIsSmi(result.value()), &negative_check, &check_undefined);
+
+ GotoIfNot(TaggedIsSmi(input), &check_undefined);
+ result = CAST(input);
+ Goto(&negative_check);
BIND(&check_undefined);
- Branch(IsUndefined(result.value()), &return_zero, &defined);
+ Branch(IsUndefined(input), &return_zero, &defined);
BIND(&defined);
- result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
- CodeStubAssembler::kTruncateMinusZero));
- GotoIfNot(TaggedIsSmi(result.value()), range_error);
- CSA_ASSERT(this, TaggedIsSmi(result.value()));
+ TNode<Number> integer_input =
+ CAST(CallBuiltin(Builtins::kToInteger_TruncateMinusZero, context, input));
+ GotoIfNot(TaggedIsSmi(integer_input), range_error);
+ result = CAST(integer_input);
Goto(&negative_check);
BIND(&negative_check);
Branch(SmiLessThan(result.value(), SmiConstant(0)), range_error, &done);
BIND(&return_zero);
- result.Bind(SmiConstant(0));
+ result = SmiConstant(0);
Goto(&done);
BIND(&done);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(result.value()));
return result.value();
}
-Node* CodeStubAssembler::ToSmiLength(Node* input, Node* const context,
- Label* range_error) {
- VARIABLE(result, MachineRepresentation::kTagged, input);
- Label to_integer(this), negative_check(this), return_zero(this), done(this);
- Branch(TaggedIsSmi(result.value()), &negative_check, &to_integer);
+TNode<Smi> CodeStubAssembler::ToSmiLength(TNode<Object> input,
+ TNode<Context> context,
+ Label* range_error) {
+ TVARIABLE(Smi, result);
+ Label to_integer(this), negative_check(this),
+ heap_number_negative_check(this), return_zero(this), done(this);
+
+ GotoIfNot(TaggedIsSmi(input), &to_integer);
+ result = CAST(input);
+ Goto(&negative_check);
BIND(&to_integer);
- result.Bind(ToInteger_Inline(CAST(context), CAST(result.value()),
- CodeStubAssembler::kTruncateMinusZero));
- GotoIf(TaggedIsSmi(result.value()), &negative_check);
- // result.value() can still be a negative HeapNumber here.
- Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context, result.value(),
- SmiConstant(0))),
- &return_zero, range_error);
+ {
+ TNode<Number> integer_input = CAST(
+ CallBuiltin(Builtins::kToInteger_TruncateMinusZero, context, input));
+ GotoIfNot(TaggedIsSmi(integer_input), &heap_number_negative_check);
+ result = CAST(integer_input);
+ Goto(&negative_check);
+
+ // integer_input can still be a negative HeapNumber here.
+ BIND(&heap_number_negative_check);
+ TNode<HeapNumber> heap_number_input = CAST(integer_input);
+ Branch(IsTrue(CallBuiltin(Builtins::kLessThan, context, heap_number_input,
+ SmiConstant(0))),
+ &return_zero, range_error);
+ }
BIND(&negative_check);
Branch(SmiLessThan(result.value(), SmiConstant(0)), &return_zero, &done);
BIND(&return_zero);
- result.Bind(SmiConstant(0));
+ result = SmiConstant(0);
Goto(&done);
BIND(&done);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(result.value()));
return result.value();
}
@@ -5829,7 +6199,7 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
Label return_zero(this, Label::kDeferred);
// Load the current {arg} value.
- TNode<Object> arg = var_arg;
+ TNode<Object> arg = var_arg.value();
// Check if {arg} is a Smi.
GotoIf(TaggedIsSmi(arg), &out);
@@ -5874,8 +6244,9 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
}
BIND(&out);
- if (mode == kTruncateMinusZero) CSA_ASSERT(this, IsNumberNormalized(var_arg));
- return CAST(var_arg);
+ if (mode == kTruncateMinusZero)
+ CSA_ASSERT(this, IsNumberNormalized(var_arg.value()));
+ return CAST(var_arg.value());
}
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
@@ -6176,7 +6547,7 @@ Node* CodeStubAssembler::ComputeIntegerHash(Node* key) {
Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
// See v8::internal::ComputeIntegerHash()
- Node* hash = TruncateWordToWord32(key);
+ Node* hash = TruncateIntPtrToInt32(key);
hash = Word32Xor(hash, seed);
hash = Int32Add(Word32Xor(hash, Int32Constant(0xFFFFFFFF)),
Word32Shl(hash, Int32Constant(15)));
@@ -6382,36 +6753,38 @@ Node* CodeStubAssembler::DescriptorArrayNumberOfEntries(Node* descriptors) {
descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex));
}
-namespace {
-
-Node* DescriptorNumberToIndex(CodeStubAssembler* a, Node* descriptor_number) {
- Node* descriptor_size = a->Int32Constant(DescriptorArray::kEntrySize);
- Node* index = a->Int32Mul(descriptor_number, descriptor_size);
- return a->ChangeInt32ToIntPtr(index);
+Node* CodeStubAssembler::DescriptorNumberToIndex(
+ SloppyTNode<Uint32T> descriptor_number) {
+ Node* descriptor_size = Int32Constant(DescriptorArray::kEntrySize);
+ Node* index = Int32Mul(descriptor_number, descriptor_size);
+ return ChangeInt32ToIntPtr(index);
}
-} // namespace
-
Node* CodeStubAssembler::DescriptorArrayToKeyIndex(Node* descriptor_number) {
return IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
- DescriptorNumberToIndex(this, descriptor_number));
+ DescriptorNumberToIndex(descriptor_number));
}
Node* CodeStubAssembler::DescriptorArrayGetSortedKeyIndex(
Node* descriptors, Node* descriptor_number) {
- const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
- Node* details = LoadAndUntagToWord32FixedArrayElement(
- descriptors, DescriptorNumberToIndex(this, descriptor_number),
- details_offset);
+ Node* details = DescriptorArrayGetDetails(
+ TNode<DescriptorArray>::UncheckedCast(descriptors),
+ TNode<Uint32T>::UncheckedCast(descriptor_number));
return DecodeWord32<PropertyDetails::DescriptorPointer>(details);
}
Node* CodeStubAssembler::DescriptorArrayGetKey(Node* descriptors,
Node* descriptor_number) {
const int key_offset = DescriptorArray::ToKeyIndex(0) * kPointerSize;
- return LoadFixedArrayElement(descriptors,
- DescriptorNumberToIndex(this, descriptor_number),
- key_offset);
+ return LoadFixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), key_offset);
+}
+
+TNode<Uint32T> CodeStubAssembler::DescriptorArrayGetDetails(
+ TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
+ const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
+ return TNode<Uint32T>::UncheckedCast(LoadAndUntagToWord32FixedArrayElement(
+ descriptors, DescriptorNumberToIndex(descriptor_number), details_offset));
}
void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
@@ -6610,12 +6983,22 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Variable* var_value) {
DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
- Comment("[ LoadPropertyFromFastObject");
Node* details =
LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
var_details->Bind(details);
+ LoadPropertyFromFastObject(object, map, descriptors, name_index, details,
+ var_value);
+}
+
+void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
+ Node* descriptors,
+ Node* name_index,
+ Node* details,
+ Variable* var_value) {
+ Comment("[ LoadPropertyFromFastObject");
+
Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
@@ -6960,6 +7343,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
FLOAT32_ELEMENTS,
FLOAT64_ELEMENTS,
UINT8_CLAMPED_ELEMENTS,
+ BIGUINT64_ELEMENTS,
+ BIGINT64_ELEMENTS,
};
Label* labels[] = {
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
@@ -6978,6 +7363,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
&if_typedarray,
&if_typedarray,
&if_typedarray,
+ &if_typedarray,
+ &if_typedarray,
};
// clang-format on
STATIC_ASSERT(arraysize(values) == arraysize(labels));
@@ -7358,8 +7745,10 @@ Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
}
Node* CodeStubAssembler::LoadFeedbackVector(Node* closure) {
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- return LoadObjectField(cell, Cell::kValueOffset);
+ Node* feedback_cell =
+ LoadObjectField(closure, JSFunction::kFeedbackCellOffset);
+ CSA_ASSERT(this, IsFeedbackCell(feedback_cell));
+ return LoadObjectField(feedback_cell, FeedbackCell::kValueOffset);
}
Node* CodeStubAssembler::LoadFeedbackVectorForStub() {
@@ -7435,6 +7824,7 @@ void CodeStubAssembler::CheckForAssociatedProtector(Node* name,
if_protector);
GotoIf(WordEqual(name, LoadRoot(Heap::kis_concat_spreadable_symbolRootIndex)),
if_protector);
+ GotoIf(WordEqual(name, LoadRoot(Heap::kthen_stringRootIndex)), if_protector);
// Fall through if no case matched.
}
@@ -7445,23 +7835,23 @@ Node* CodeStubAssembler::LoadReceiverMap(Node* receiver) {
MachineRepresentation::kTagged);
}
-Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
- VARIABLE(var_intptr_key, MachineType::PointerRepresentation());
+TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
+ TVARIABLE(IntPtrT, var_intptr_key);
Label done(this, &var_intptr_key), key_is_smi(this);
GotoIf(TaggedIsSmi(key), &key_is_smi);
// Try to convert a heap number to a Smi.
GotoIfNot(IsHeapNumber(key), miss);
{
- Node* value = LoadHeapNumberValue(key);
- Node* int_value = RoundFloat64ToInt32(value);
+ TNode<Float64T> value = LoadHeapNumberValue(key);
+ TNode<Int32T> int_value = RoundFloat64ToInt32(value);
GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
- var_intptr_key.Bind(ChangeInt32ToIntPtr(int_value));
+ var_intptr_key = ChangeInt32ToIntPtr(int_value);
Goto(&done);
}
BIND(&key_is_smi);
{
- var_intptr_key.Bind(SmiUntag(key));
+ var_intptr_key = SmiUntag(key);
Goto(&done);
}
@@ -7658,7 +8048,7 @@ Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
}
Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
- Node* input, ElementsKind elements_kind, Label* bailout) {
+ TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
MachineRepresentation rep;
@@ -7683,17 +8073,24 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
}
VARIABLE(var_result, rep);
- Label done(this, &var_result), if_smi(this), if_heapnumber(this);
- GotoIf(TaggedIsSmi(input), &if_smi);
+ VARIABLE(var_input, MachineRepresentation::kTagged, input);
+ Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this),
+ convert(this), loop(this, &var_input);
+ Goto(&loop);
+ BIND(&loop);
+ GotoIf(TaggedIsSmi(var_input.value()), &if_smi);
// We can handle both HeapNumber and Oddball here, since Oddball has the
// same layout as the HeapNumber for the HeapNumber::value field. This
// way we can also properly optimize stores of oddballs to typed arrays.
- GotoIf(IsHeapNumber(input), &if_heapnumber);
- Branch(HasInstanceType(input, ODDBALL_TYPE), &if_heapnumber, bailout);
+ GotoIf(IsHeapNumber(var_input.value()), &if_heapnumber_or_oddball);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Branch(HasInstanceType(var_input.value(), ODDBALL_TYPE),
+ &if_heapnumber_or_oddball, &convert);
- BIND(&if_heapnumber);
+ BIND(&if_heapnumber_or_oddball);
{
- Node* value = LoadHeapNumberValue(input);
+ Node* value = UncheckedCast<Float64T>(LoadObjectField(
+ var_input.value(), HeapNumber::kValueOffset, MachineType::Float64()));
if (rep == MachineRepresentation::kWord32) {
if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
value = Float64ToUint8Clamped(value);
@@ -7711,7 +8108,7 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
BIND(&if_smi);
{
- Node* value = SmiToWord32(input);
+ Node* value = SmiToInt32(var_input.value());
if (rep == MachineRepresentation::kFloat32) {
value = RoundInt32ToFloat32(value);
} else if (rep == MachineRepresentation::kFloat64) {
@@ -7726,67 +8123,125 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
Goto(&done);
}
+ BIND(&convert);
+ {
+ var_input.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, input));
+ Goto(&loop);
+ }
+
BIND(&done);
return var_result.value();
}
+void CodeStubAssembler::EmitBigTypedArrayElementStore(
+ TNode<JSTypedArray> object, TNode<FixedTypedArrayBase> elements,
+ TNode<IntPtrT> intptr_key, TNode<Object> value, TNode<Context> context,
+ Label* opt_if_neutered) {
+ TNode<BigInt> bigint_value = ToBigInt(context, value);
+ TNode<WordT> bitfield = LoadBigIntBitfield(bigint_value);
+ TNode<UintPtrT> length = DecodeWord<BigIntBase::LengthBits>(bitfield);
+ TNode<UintPtrT> sign = DecodeWord<BigIntBase::SignBits>(bitfield);
+ TVARIABLE(UintPtrT, var_low, Unsigned(IntPtrConstant(0)));
+ // Only used on 32-bit platforms.
+ TVARIABLE(UintPtrT, var_high, Unsigned(IntPtrConstant(0)));
+ Label do_store(this);
+ GotoIf(WordEqual(length, IntPtrConstant(0)), &do_store);
+ var_low = LoadBigIntDigit(bigint_value, 0);
+ if (!Is64()) {
+ Label load_done(this);
+ GotoIf(WordEqual(length, IntPtrConstant(1)), &load_done);
+ var_high = LoadBigIntDigit(bigint_value, 1);
+ Goto(&load_done);
+ BIND(&load_done);
+ }
+ GotoIf(WordEqual(sign, IntPtrConstant(0)), &do_store);
+ // Negative value. Simulate two's complement.
+ if (!Is64()) {
+ var_high = Unsigned(IntPtrSub(IntPtrConstant(0), var_high.value()));
+ Label no_carry(this);
+ GotoIf(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry);
+ var_high = Unsigned(IntPtrSub(var_high.value(), IntPtrConstant(1)));
+ Goto(&no_carry);
+ BIND(&no_carry);
+ }
+ var_low = Unsigned(IntPtrSub(IntPtrConstant(0), var_low.value()));
+ Goto(&do_store);
+
+ BIND(&do_store);
+ if (opt_if_neutered != nullptr) {
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), opt_if_neutered);
+ }
+
+ Node* backing_store = LoadFixedTypedArrayBackingStore(elements);
+ Node* offset = ElementOffsetFromIndex(intptr_key, BIGINT64_ELEMENTS,
+ INTPTR_PARAMETERS, 0);
+ MachineRepresentation rep = WordT::kMachineRepresentation;
+ StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
+ if (!Is64()) {
+ StoreNoWriteBarrier(rep, backing_store,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ var_high.value());
+ }
+}
+
void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
bool is_jsarray,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode,
- Label* bailout) {
+ Label* bailout, Node* context) {
CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
+
Node* elements = LoadElements(object);
- if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
- // Bailout in case of COW elements.
- GotoIf(WordNotEqual(LoadMap(elements),
- LoadRoot(Heap::kFixedArrayMapRootIndex)),
- bailout);
+ if (!IsSmiOrObjectElementsKind(elements_kind)) {
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ } else if (!IsCOWHandlingStoreMode(store_mode)) {
+ GotoIf(IsFixedCOWArrayMap(LoadMap(elements)), bailout);
}
+
// TODO(ishell): introduce TryToIntPtrOrSmi() and use OptimalParameterMode().
ParameterMode parameter_mode = INTPTR_PARAMETERS;
- key = TryToIntptr(key, bailout);
+ TNode<IntPtrT> intptr_key = TryToIntptr(key, bailout);
if (IsFixedTypedArrayElementsKind(elements_kind)) {
Label done(this);
- // TODO(ishell): call ToNumber() on value and don't bailout but be careful
- // to call it only once if we decide to bailout because of bounds checks.
-
- value = PrepareValueForWriteToTypedArray(value, elements_kind, bailout);
-
- // There must be no allocations between the buffer load and
- // and the actual store to backing store, because GC may decide that
- // the buffer is not alive or move the elements.
- // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
-
- // Check if buffer has been neutered.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), bailout);
-
// Bounds check.
Node* length = TaggedToParameter(
CAST(LoadObjectField(object, JSTypedArray::kLengthOffset)),
parameter_mode);
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- // Skip the store if we write beyond the length.
- GotoIfNot(IntPtrLessThan(key, length), &done);
- // ... but bailout if the key is negative.
+ // Skip the store if we write beyond the length or
+ // to a property with a negative integer index.
+ GotoIfNot(UintPtrLessThan(intptr_key, length), &done);
} else {
DCHECK_EQ(STANDARD_STORE, store_mode);
+ GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
}
- GotoIfNot(UintPtrLessThan(key, length), bailout);
- // Backing store = external_pointer + base_pointer.
- Node* external_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* base_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* backing_store =
- IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
- StoreElement(backing_store, elements_kind, key, value, parameter_mode);
+ TNode<Object> value_obj = UncheckedCast<Object>(value);
+ if (elements_kind == BIGINT64_ELEMENTS ||
+ elements_kind == BIGUINT64_ELEMENTS) {
+ EmitBigTypedArrayElementStore(CAST(object), CAST(elements), intptr_key,
+ value_obj, CAST(context), bailout);
+ } else {
+ value = PrepareValueForWriteToTypedArray(value_obj, elements_kind,
+ CAST(context));
+
+ // There must be no allocations between the buffer load and
+ // and the actual store to backing store, because GC may decide that
+ // the buffer is not alive or move the elements.
+ // TODO(ishell): introduce DisallowHeapAllocationCode scope here.
+
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), bailout);
+
+ Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
+ StoreElement(backing_store, elements_kind, intptr_key, value,
+ parameter_mode);
+ }
Goto(&done);
BIND(&done);
@@ -7809,32 +8264,39 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
}
if (IsGrowStoreMode(store_mode)) {
- elements = CheckForCapacityGrow(object, elements, elements_kind, length,
- key, parameter_mode, is_jsarray, bailout);
+ elements = CheckForCapacityGrow(object, elements, elements_kind, store_mode,
+ length, intptr_key, parameter_mode,
+ is_jsarray, bailout);
} else {
- GotoIfNot(UintPtrLessThan(key, length), bailout);
+ GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
+ }
- if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW) &&
- IsSmiOrObjectElementsKind(elements_kind)) {
- elements = CopyElementsOnWrite(object, elements, elements_kind, length,
- parameter_mode, bailout);
- }
+ // If we didn't grow {elements}, it might still be COW, in which case we
+ // copy it now.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) {
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ } else if (IsCOWHandlingStoreMode(store_mode)) {
+ elements = CopyElementsOnWrite(object, elements, elements_kind, length,
+ parameter_mode, bailout);
}
- StoreElement(elements, elements_kind, key, value, parameter_mode);
+
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ StoreElement(elements, elements_kind, intptr_key, value, parameter_mode);
}
-Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
- ElementsKind kind, Node* length,
- Node* key, ParameterMode mode,
- bool is_js_array,
- Label* bailout) {
+Node* CodeStubAssembler::CheckForCapacityGrow(
+ Node* object, Node* elements, ElementsKind kind,
+ KeyedAccessStoreMode store_mode, Node* length, Node* key,
+ ParameterMode mode, bool is_js_array, Label* bailout) {
VARIABLE(checked_elements, MachineRepresentation::kTagged);
- Label grow_case(this), no_grow_case(this), done(this);
+ Label grow_case(this), no_grow_case(this), done(this),
+ grow_bailout(this, Label::kDeferred);
Node* condition;
if (IsHoleyOrDictionaryElementsKind(kind)) {
condition = UintPtrGreaterThanOrEqual(key, length);
} else {
+ // We don't support growing here unless the value is being appended.
condition = WordEqual(key, length);
}
Branch(condition, &grow_case, &no_grow_case);
@@ -7843,20 +8305,32 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
{
Node* current_capacity =
TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
-
checked_elements.Bind(elements);
-
Label fits_capacity(this);
+ // If key is negative, we will notice in Runtime::kGrowArrayElements.
GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity);
+
{
Node* new_elements = TryGrowElementsCapacity(
- object, elements, kind, key, current_capacity, mode, bailout);
-
+ object, elements, kind, key, current_capacity, mode, &grow_bailout);
checked_elements.Bind(new_elements);
Goto(&fits_capacity);
}
- BIND(&fits_capacity);
+ BIND(&grow_bailout);
+ {
+ Node* tagged_key = mode == SMI_PARAMETERS
+ ? key
+ : ChangeInt32ToTagged(TruncateIntPtrToInt32(key));
+ Node* maybe_elements = CallRuntime(
+ Runtime::kGrowArrayElements, NoContextConstant(), object, tagged_key);
+ GotoIf(TaggedIsSmi(maybe_elements), bailout);
+ CSA_ASSERT(this, IsFixedArrayWithKind(maybe_elements, kind));
+ checked_elements.Bind(maybe_elements);
+ Goto(&fits_capacity);
+ }
+
+ BIND(&fits_capacity);
if (is_js_array) {
Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
@@ -7883,15 +8357,12 @@ Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
VARIABLE(new_elements_var, MachineRepresentation::kTagged, elements);
Label done(this);
- GotoIfNot(
- WordEqual(LoadMap(elements), LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
- &done);
+ GotoIfNot(IsFixedCOWArrayMap(LoadMap(elements)), &done);
{
Node* capacity =
TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
Node* new_elements = GrowElementsCapacity(object, elements, kind, kind,
length, capacity, mode, bailout);
-
new_elements_var.Bind(new_elements);
Goto(&done);
}
@@ -8243,19 +8714,22 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
{
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(var_left_float, var_right_float), if_true,
- if_false);
+ Branch(Float64LessThan(var_left_float.value(), var_right_float.value()),
+ if_true, if_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(var_left_float, var_right_float), if_true,
- if_false);
+ Branch(Float64LessThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
+ if_true, if_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(var_left_float, var_right_float), if_true,
- if_false);
+ Branch(
+ Float64GreaterThan(var_left_float.value(), var_right_float.value()),
+ if_true, if_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ Branch(Float64GreaterThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
if_true, if_false);
break;
default:
@@ -8649,19 +9123,22 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
{
switch (op) {
case Operation::kLessThan:
- Branch(Float64LessThan(var_left_float, var_right_float), &return_true,
- &return_false);
+ Branch(Float64LessThan(var_left_float.value(), var_right_float.value()),
+ &return_true, &return_false);
break;
case Operation::kLessThanOrEqual:
- Branch(Float64LessThanOrEqual(var_left_float, var_right_float),
+ Branch(Float64LessThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
&return_true, &return_false);
break;
case Operation::kGreaterThan:
- Branch(Float64GreaterThan(var_left_float, var_right_float),
- &return_true, &return_false);
+ Branch(
+ Float64GreaterThan(var_left_float.value(), var_right_float.value()),
+ &return_true, &return_false);
break;
case Operation::kGreaterThanOrEqual:
- Branch(Float64GreaterThanOrEqual(var_left_float, var_right_float),
+ Branch(Float64GreaterThanOrEqual(var_left_float.value(),
+ var_right_float.value()),
&return_true, &return_false);
break;
default:
@@ -8682,7 +9159,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
}
BIND(&end);
- return var_result;
+ return var_result.value();
}
Node* CodeStubAssembler::CollectFeedbackForString(Node* instance_type) {
@@ -9090,8 +9567,8 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
BIND(&do_float_comparison);
{
- Branch(Float64Equal(var_left_float, var_right_float), &if_equal,
- &if_notequal);
+ Branch(Float64Equal(var_left_float.value(), var_right_float.value()),
+ &if_equal, &if_notequal);
}
BIND(&if_equal);
@@ -9621,73 +10098,8 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<HeapObject> object,
}
BIND(&end);
- CSA_ASSERT(this, IsBoolean(result));
- return result;
-}
-
-Node* CodeStubAssembler::ClassOf(Node* value) {
- VARIABLE(var_result, MachineRepresentation::kTaggedPointer);
- Label if_function_template_info(this, Label::kDeferred),
- if_no_class_name(this, Label::kDeferred),
- if_function(this, Label::kDeferred), if_object(this, Label::kDeferred),
- if_primitive(this, Label::kDeferred), return_result(this);
-
- // Check if {value} is a Smi.
- GotoIf(TaggedIsSmi(value), &if_primitive);
-
- Node* value_map = LoadMap(value);
- Node* value_instance_type = LoadMapInstanceType(value_map);
-
- // Check if {value} is a JSFunction or JSBoundFunction.
- STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
- GotoIf(Uint32LessThanOrEqual(Int32Constant(FIRST_FUNCTION_TYPE),
- value_instance_type),
- &if_function);
-
- // Check if {value} is a primitive HeapObject.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- GotoIfNot(IsJSReceiverInstanceType(value_instance_type), &if_primitive);
-
- // Load the {value}s constructor, and check that it's a JSFunction.
- Node* constructor = LoadMapConstructor(value_map);
- GotoIf(HasInstanceType(constructor, FUNCTION_TEMPLATE_INFO_TYPE),
- &if_function_template_info);
- GotoIfNot(IsJSFunction(constructor), &if_object);
-
- // Return the instance class name for the {constructor}.
- Node* shared_info =
- LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
- Node* instance_class_name = LoadObjectField(
- shared_info, SharedFunctionInfo::kInstanceClassNameOffset);
- var_result.Bind(instance_class_name);
- Goto(&return_result);
-
- // For remote objects the constructor might be given as FTI.
- BIND(&if_function_template_info);
- Node* class_name =
- LoadObjectField(constructor, FunctionTemplateInfo::kClassNameOffset);
- GotoIf(IsUndefined(class_name), &if_no_class_name);
- var_result.Bind(class_name);
- Goto(&return_result);
-
- BIND(&if_no_class_name);
- var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
- Goto(&return_result);
-
- BIND(&if_function);
- var_result.Bind(LoadRoot(Heap::kFunction_stringRootIndex));
- Goto(&return_result);
-
- BIND(&if_object);
- var_result.Bind(LoadRoot(Heap::kObject_stringRootIndex));
- Goto(&return_result);
-
- BIND(&if_primitive);
- var_result.Bind(NullConstant());
- Goto(&return_result);
-
- BIND(&return_result);
- return var_result.value();
+ CSA_ASSERT(this, IsBoolean(result.value()));
+ return result.value();
}
Node* CodeStubAssembler::Typeof(Node* value) {
@@ -9801,6 +10213,46 @@ Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
return result.value();
}
+Node* CodeStubAssembler::SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor) {
+ Isolate* isolate = this->isolate();
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ var_result.Bind(default_constructor);
+
+ // 2. Let C be ? Get(O, "constructor").
+ Node* const constructor =
+ GetProperty(context, object, isolate->factory()->constructor_string());
+
+ // 3. If C is undefined, return defaultConstructor.
+ Label out(this);
+ GotoIf(IsUndefined(constructor), &out);
+
+ // 4. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, constructor,
+ MessageTemplate::kConstructorNotReceiver);
+
+ // 5. Let S be ? Get(C, @@species).
+ Node* const species =
+ GetProperty(context, constructor, isolate->factory()->species_symbol());
+
+ // 6. If S is either undefined or null, return defaultConstructor.
+ GotoIf(IsNullOrUndefined(species), &out);
+
+ // 7. If IsConstructor(S) is true, return S.
+ Label throw_error(this);
+ GotoIf(TaggedIsSmi(species), &throw_error);
+ GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
+ var_result.Bind(species);
+ Goto(&out);
+
+ // 8. Throw a TypeError exception.
+ BIND(&throw_error);
+ ThrowTypeError(context, MessageTemplate::kSpeciesNotConstructor);
+
+ BIND(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
Node* context) {
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -9871,16 +10323,10 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
}
BIND(&if_notcallable);
- {
- CallRuntime(Runtime::kThrowNonCallableInInstanceOfCheck, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNonCallableInInstanceOfCheck); }
BIND(&if_notreceiver);
- {
- CallRuntime(Runtime::kThrowNonObjectInInstanceOfCheck, context);
- Unreachable();
- }
+ { ThrowTypeError(context, MessageTemplate::kNonObjectInInstanceOfCheck); }
BIND(&return_true);
var_result.Bind(TrueConstant());
@@ -9894,50 +10340,51 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
return var_result.value();
}
-Node* CodeStubAssembler::NumberInc(Node* value) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_finc_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberInc(SloppyTNode<Number> value) {
+ TVARIABLE(Number, var_result);
+ TVARIABLE(Float64T, var_finc_value);
Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
BIND(&if_issmi);
{
// Try fast Smi addition first.
- Node* one = SmiConstant(1);
- Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
- BitcastTaggedToWord(one));
- Node* overflow = Projection(1, pair);
+ TNode<Smi> one = SmiConstant(1);
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(
+ BitcastTaggedToWord(value), BitcastTaggedToWord(one));
+ TNode<BoolT> overflow = Projection<1>(pair);
// Check if the Smi addition overflowed.
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&if_overflow);
{
- var_finc_value.Bind(SmiToFloat64(value));
+ TNode<Smi> smi_value = CAST(value);
+ var_finc_value = SmiToFloat64(smi_value);
Goto(&do_finc);
}
}
BIND(&if_isnotsmi);
{
- CSA_ASSERT(this, IsHeapNumber(value));
+ TNode<HeapNumber> heap_number_value = CAST(value);
// Load the HeapNumber value.
- var_finc_value.Bind(LoadHeapNumberValue(value));
+ var_finc_value = LoadHeapNumberValue(heap_number_value);
Goto(&do_finc);
}
BIND(&do_finc);
{
- Node* finc_value = var_finc_value.value();
- Node* one = Float64Constant(1.0);
- Node* finc_result = Float64Add(finc_value, one);
- var_result.Bind(AllocateHeapNumberWithValue(finc_result));
+ TNode<Float64T> finc_value = var_finc_value.value();
+ TNode<Float64T> one = Float64Constant(1.0);
+ TNode<Float64T> finc_result = Float64Add(finc_value, one);
+ var_result = AllocateHeapNumberWithValue(finc_result);
Goto(&end);
}
@@ -9945,50 +10392,51 @@ Node* CodeStubAssembler::NumberInc(Node* value) {
return var_result.value();
}
-Node* CodeStubAssembler::NumberDec(Node* value) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_fdec_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberDec(SloppyTNode<Number> value) {
+ TVARIABLE(Number, var_result);
+ TVARIABLE(Float64T, var_fdec_value);
Label if_issmi(this), if_isnotsmi(this), do_fdec(this), end(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
BIND(&if_issmi);
{
- // Try fast Smi addition first.
- Node* one = SmiConstant(1);
- Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(value),
- BitcastTaggedToWord(one));
- Node* overflow = Projection(1, pair);
+ // Try fast Smi subtraction first.
+ TNode<Smi> one = SmiConstant(1);
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
+ BitcastTaggedToWord(value), BitcastTaggedToWord(one));
+ TNode<BoolT> overflow = Projection<1>(pair);
- // Check if the Smi addition overflowed.
+ // Check if the Smi subtraction overflowed.
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_notoverflow);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&if_overflow);
{
- var_fdec_value.Bind(SmiToFloat64(value));
+ TNode<Smi> smi_value = CAST(value);
+ var_fdec_value = SmiToFloat64(smi_value);
Goto(&do_fdec);
}
}
BIND(&if_isnotsmi);
{
- CSA_ASSERT(this, IsHeapNumber(value));
+ TNode<HeapNumber> heap_number_value = CAST(value);
// Load the HeapNumber value.
- var_fdec_value.Bind(LoadHeapNumberValue(value));
+ var_fdec_value = LoadHeapNumberValue(heap_number_value);
Goto(&do_fdec);
}
BIND(&do_fdec);
{
- Node* fdec_value = var_fdec_value.value();
- Node* minus_one = Float64Constant(-1.0);
- Node* fdec_result = Float64Add(fdec_value, minus_one);
- var_result.Bind(AllocateHeapNumberWithValue(fdec_result));
+ TNode<Float64T> fdec_value = var_fdec_value.value();
+ TNode<Float64T> minus_one = Float64Constant(-1.0);
+ TNode<Float64T> fdec_result = Float64Add(fdec_value, minus_one);
+ var_result = AllocateHeapNumberWithValue(fdec_result);
Goto(&end);
}
@@ -9996,29 +10444,29 @@ Node* CodeStubAssembler::NumberDec(Node* value) {
return var_result.value();
}
-Node* CodeStubAssembler::NumberAdd(Node* a, Node* b) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_fadd_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberAdd(SloppyTNode<Number> a,
+ SloppyTNode<Number> b) {
+ TVARIABLE(Number, var_result);
Label float_add(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_add);
GotoIf(TaggedIsNotSmi(b), &float_add);
// Try fast Smi addition first.
- Node* pair =
+ TNode<PairT<IntPtrT, BoolT>> pair =
IntPtrAddWithOverflow(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
- Node* overflow = Projection(1, pair);
+ TNode<BoolT> overflow = Projection<1>(pair);
// Check if the Smi addition overflowed.
Label if_overflow(this), if_notoverflow(this);
GotoIf(overflow, &float_add);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&float_add);
{
- var_result.Bind(ChangeFloat64ToTagged(
- Float64Add(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b))));
+ var_result = ChangeFloat64ToTagged(
+ Float64Add(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b)));
Goto(&end);
}
@@ -10026,29 +10474,29 @@ Node* CodeStubAssembler::NumberAdd(Node* a, Node* b) {
return var_result.value();
}
-Node* CodeStubAssembler::NumberSub(Node* a, Node* b) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_fsub_value, MachineRepresentation::kFloat64);
+TNode<Number> CodeStubAssembler::NumberSub(SloppyTNode<Number> a,
+ SloppyTNode<Number> b) {
+ TVARIABLE(Number, var_result);
Label float_sub(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_sub);
GotoIf(TaggedIsNotSmi(b), &float_sub);
// Try fast Smi subtraction first.
- Node* pair =
+ TNode<PairT<IntPtrT, BoolT>> pair =
IntPtrSubWithOverflow(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
- Node* overflow = Projection(1, pair);
+ TNode<BoolT> overflow = Projection<1>(pair);
// Check if the Smi subtraction overflowed.
Label if_overflow(this), if_notoverflow(this);
GotoIf(overflow, &float_sub);
- var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ var_result = BitcastWordToTaggedSigned(Projection<0>(pair));
Goto(&end);
BIND(&float_sub);
{
- var_result.Bind(ChangeFloat64ToTagged(
- Float64Sub(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b))));
+ var_result = ChangeFloat64ToTagged(
+ Float64Sub(ChangeNumberToFloat64(a), ChangeNumberToFloat64(b)));
Goto(&end);
}
@@ -10348,15 +10796,6 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
return result;
}
-Node* CodeStubAssembler::TypedArraySpeciesCreateByLength(Node* context,
- Node* originalArray,
- Node* len) {
- // TODO(tebbi): Install a fast path as well, which avoids the runtime
- // call.
- return CallRuntime(Runtime::kTypedArraySpeciesCreateByLength, context,
- originalArray, len);
-}
-
Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
@@ -10430,7 +10869,7 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
assembler_->Goto(&argument_done);
assembler_->BIND(&argument_done);
- return result;
+ return result.value();
}
void CodeStubArguments::ForEach(
@@ -10470,7 +10909,7 @@ void CodeStubArguments::PopAndReturn(Node* value) {
pop_count = argc_;
}
- assembler_->PopAndReturn(assembler_->ParameterToWord(pop_count, argc_mode_),
+ assembler_->PopAndReturn(assembler_->ParameterToIntPtr(pop_count, argc_mode_),
value);
}
@@ -10534,8 +10973,8 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(fun, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(fun, JSFunction::kFeedbackVectorOffset,
- Heap::kUndefinedCellRootIndex);
+ StoreObjectFieldRoot(fun, JSFunction::kFeedbackCellOffset,
+ Heap::kManyClosuresCellRootIndex);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
@@ -10543,28 +10982,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
return fun;
}
-Node* CodeStubAssembler::AllocatePromiseReactionJobInfo(
- Node* value, Node* tasks, Node* deferred_promise, Node* deferred_on_resolve,
- Node* deferred_on_reject, Node* context) {
- Node* const result = Allocate(PromiseReactionJobInfo::kSize);
- StoreMapNoWriteBarrier(result, Heap::kPromiseReactionJobInfoMapRootIndex);
- StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kValueOffset,
- value);
- StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kTasksOffset,
- tasks);
- StoreObjectFieldNoWriteBarrier(
- result, PromiseReactionJobInfo::kDeferredPromiseOffset, deferred_promise);
- StoreObjectFieldNoWriteBarrier(
- result, PromiseReactionJobInfo::kDeferredOnResolveOffset,
- deferred_on_resolve);
- StoreObjectFieldNoWriteBarrier(
- result, PromiseReactionJobInfo::kDeferredOnRejectOffset,
- deferred_on_reject);
- StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kContextOffset,
- context);
- return result;
-}
-
Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function,
StackFrame::Type frame_type) {
return WordEqual(marker_or_function,
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 0dd7a35c4a..8fca0b667f 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -64,12 +64,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(Tuple3Map, tuple3_map, Tuple3Map) \
V(UndefinedValue, undefined_value, Undefined) \
V(WeakCellMap, weak_cell_map, WeakCellMap) \
- V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
- V(promise_default_reject_handler_symbol, \
- promise_default_reject_handler_symbol, PromiseDefaultRejectHandlerSymbol) \
- V(promise_default_resolve_handler_symbol, \
- promise_default_resolve_handler_symbol, \
- PromiseDefaultResolveHandlerSymbol)
+ V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap)
// Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared
// here to simplify use in other generated builtins.
@@ -131,18 +126,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return ParameterRepresentation(OptimalParameterMode());
}
- Node* ParameterToWord(Node* value, ParameterMode mode) {
+ Node* ParameterToIntPtr(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) value = SmiUntag(value);
return value;
}
- Node* WordToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
+ Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) return SmiTag(value);
return value;
}
- Node* Word32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
- return WordToParameter(ChangeInt32ToIntPtr(value), mode);
+ Node* Int32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
+ return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode);
}
TNode<Smi> ParameterToTagged(Node* value, ParameterMode mode) {
@@ -223,17 +218,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Select the minimum of the two provided Number values.
TNode<Object> NumberMin(SloppyTNode<Object> left, SloppyTNode<Object> right);
- // Tag a Word as a Smi value.
+ // After converting an index to an integer, calculate a relative index: if
+ // index < 0, max(length + index, 0); else min(index, length)
+ TNode<IntPtrT> ConvertToRelativeIndex(TNode<Context> context,
+ TNode<Object> index,
+ TNode<IntPtrT> length);
+
+ // Tag an IntPtr as a Smi value.
TNode<Smi> SmiTag(SloppyTNode<IntPtrT> value);
- // Untag a Smi value as a Word.
+ // Untag a Smi value as an IntPtr.
TNode<IntPtrT> SmiUntag(SloppyTNode<Smi> value);
// Smi conversions.
TNode<Float64T> SmiToFloat64(SloppyTNode<Smi> value);
- TNode<Smi> SmiFromWord(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
- TNode<Smi> SmiFromWord32(SloppyTNode<Int32T> value);
- TNode<IntPtrT> SmiToWord(SloppyTNode<Smi> value) { return SmiUntag(value); }
- TNode<Int32T> SmiToWord32(SloppyTNode<Smi> value);
+ TNode<Smi> SmiFromIntPtr(SloppyTNode<IntPtrT> value) { return SmiTag(value); }
+ TNode<Smi> SmiFromInt32(SloppyTNode<Int32T> value);
+ TNode<IntPtrT> SmiToIntPtr(SloppyTNode<Smi> value) { return SmiUntag(value); }
+ TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName) \
@@ -300,10 +301,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* TrySmiDiv(Node* dividend, Node* divisor, Label* bailout);
// Smi | HeapNumber operations.
- Node* NumberInc(Node* value);
- Node* NumberDec(Node* value);
- Node* NumberAdd(Node* a, Node* b);
- Node* NumberSub(Node* a, Node* b);
+ TNode<Number> NumberInc(SloppyTNode<Number> value);
+ TNode<Number> NumberDec(SloppyTNode<Number> value);
+ TNode<Number> NumberAdd(SloppyTNode<Number> a, SloppyTNode<Number> b);
+ TNode<Number> NumberSub(SloppyTNode<Number> a, SloppyTNode<Number> b);
void GotoIfNotNumber(Node* value, Label* is_not_number);
void GotoIfNumber(Node* value, Label* is_number);
@@ -318,8 +319,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* InnerAllocate(Node* previous, Node* offset);
Node* IsRegularHeapObjectSize(Node* size);
+ typedef std::function<void(Label*, Label*)> BranchGenerator;
typedef std::function<Node*()> NodeGenerator;
+ void Assert(const BranchGenerator& branch, const char* message = nullptr,
+ const char* file = nullptr, int line = 0,
+ Node* extra_node1 = nullptr, const char* extra_node1_name = "",
+ Node* extra_node2 = nullptr, const char* extra_node2_name = "",
+ Node* extra_node3 = nullptr, const char* extra_node3_name = "",
+ Node* extra_node4 = nullptr, const char* extra_node4_name = "",
+ Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void Assert(const NodeGenerator& condition_body,
const char* message = nullptr, const char* file = nullptr,
int line = 0, Node* extra_node1 = nullptr,
@@ -328,6 +337,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
const char* extra_node3_name = "", Node* extra_node4 = nullptr,
const char* extra_node4_name = "", Node* extra_node5 = nullptr,
const char* extra_node5_name = "");
+ void Check(const BranchGenerator& branch, const char* message = nullptr,
+ const char* file = nullptr, int line = 0,
+ Node* extra_node1 = nullptr, const char* extra_node1_name = "",
+ Node* extra_node2 = nullptr, const char* extra_node2_name = "",
+ Node* extra_node3 = nullptr, const char* extra_node3_name = "",
+ Node* extra_node4 = nullptr, const char* extra_node4_name = "",
+ Node* extra_node5 = nullptr, const char* extra_node5_name = "");
void Check(const NodeGenerator& condition_body, const char* message = nullptr,
const char* file = nullptr, int line = 0,
Node* extra_node1 = nullptr, const char* extra_node1_name = "",
@@ -341,15 +357,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <class A, class F, class G>
TNode<A> Select(SloppyTNode<BoolT> condition, const F& true_body,
const G& false_body, MachineRepresentation rep) {
- return UncheckedCast<A>(
- Select(condition,
- [&]() -> Node* {
- return base::implicit_cast<SloppyTNode<A>>(true_body());
- },
- [&]() -> Node* {
- return base::implicit_cast<SloppyTNode<A>>(false_body());
- },
- rep));
+ return UncheckedCast<A>(Select(
+ condition,
+ [&]() -> Node* { return base::implicit_cast<TNode<A>>(true_body()); },
+ [&]() -> Node* { return base::implicit_cast<TNode<A>>(false_body()); },
+ rep));
}
Node* SelectConstant(Node* condition, Node* true_value, Node* false_value,
@@ -385,7 +397,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Smi::FromInt(false_value));
}
- TNode<Int32T> TruncateWordToWord32(SloppyTNode<IntPtrT> value);
+ TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
// Check a value for smi-ness
TNode<BoolT> TaggedIsSmi(SloppyTNode<Object> a);
@@ -497,6 +509,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Int32T> LoadInstanceType(SloppyTNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
Node* HasInstanceType(Node* object, InstanceType type);
+ // Determines whether Array Iterator's prototype has changed.
+ TNode<BoolT> HasInitialArrayIteratorPrototypeMap(
+ TNode<Context> native_context);
+ // Determines whether Array's prototype has changed.
+ TNode<BoolT> InitialArrayPrototypeHasInitialArrayPrototypeMap(
+ TNode<Context> native_context);
+ // Determines whether an array's elements map has changed.
+ TNode<BoolT> HasInitialFastElementsKindMap(TNode<Context> native_context,
+ TNode<JSArray> jsarray);
Node* DoesntHaveInstanceType(Node* object, InstanceType type);
Node* TaggedDoesntHaveInstanceType(Node* any_tagged, InstanceType type);
// Load the properties backing store of a JSObject.
@@ -607,12 +628,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadDoubleWithHoleCheck(
Node* base, Node* offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
- Node* LoadFixedTypedArrayElement(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<RawPtrT> LoadFixedTypedArrayBackingStore(
+ TNode<FixedTypedArrayBase> typed_array);
Node* LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ // Parts of the above, factored out for readability:
+ Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
+ Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
+ Node* offset);
// Context manipulation
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
@@ -727,39 +751,59 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Allocate a HeapNumber with a specific value.
TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value,
MutableMode mode = IMMUTABLE);
+ // Allocate a BigInt with {length} digits. Sets the sign bit to {false}.
+ // Does not initialize the digits.
+ TNode<BigInt> AllocateBigInt(TNode<IntPtrT> length);
+ // Like above, but allowing custom bitfield initialization.
+ TNode<BigInt> AllocateRawBigInt(TNode<IntPtrT> length);
+ void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<WordT> bitfield);
+ void StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+ TNode<UintPtrT> digit);
+ TNode<WordT> LoadBigIntBitfield(TNode<BigInt> bigint);
+ TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
// Allocate a SeqOneByteString with the given length.
- Node* AllocateSeqOneByteString(int length, AllocationFlags flags = kNone);
- Node* AllocateSeqOneByteString(Node* context, TNode<Smi> length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqOneByteString(int length,
+ AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqOneByteString(Node* context, TNode<Smi> length,
+ AllocationFlags flags = kNone);
// Allocate a SeqTwoByteString with the given length.
- Node* AllocateSeqTwoByteString(int length, AllocationFlags flags = kNone);
- Node* AllocateSeqTwoByteString(Node* context, TNode<Smi> length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqTwoByteString(int length,
+ AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqTwoByteString(Node* context, TNode<Smi> length,
+ AllocationFlags flags = kNone);
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- Node* AllocateSlicedOneByteString(TNode<Smi> length, Node* parent,
- Node* offset);
+
+ TNode<String> AllocateSlicedOneByteString(TNode<Smi> length,
+ TNode<String> parent,
+ TNode<Smi> offset);
// Allocate a SlicedTwoByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- Node* AllocateSlicedTwoByteString(TNode<Smi> length, Node* parent,
- Node* offset);
+ TNode<String> AllocateSlicedTwoByteString(TNode<Smi> length,
+ TNode<String> parent,
+ TNode<Smi> offset);
// Allocate a one-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be one-byte strings.
- Node* AllocateOneByteConsString(TNode<Smi> length, Node* first, Node* second,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateOneByteConsString(TNode<Smi> length,
+ TNode<String> first,
+ TNode<String> second,
+ AllocationFlags flags = kNone);
// Allocate a two-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be two-byte strings.
- Node* AllocateTwoByteConsString(TNode<Smi> length, Node* first, Node* second,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateTwoByteConsString(TNode<Smi> length,
+ TNode<String> first,
+ TNode<String> second,
+ AllocationFlags flags = kNone);
// Allocate an appropriate one- or two-byte ConsString with the first and
// second parts specified by |left| and |right|.
- Node* NewConsString(Node* context, TNode<Smi> length, Node* left, Node* right,
- AllocationFlags flags = kNone);
+ TNode<String> NewConsString(Node* context, TNode<Smi> length,
+ TNode<String> left, TNode<String> right,
+ AllocationFlags flags = kNone);
Node* AllocateNameDictionary(int at_least_space_for);
Node* AllocateNameDictionary(Node* at_least_space_for);
@@ -787,9 +831,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
int start_offset = JSObject::kHeaderSize);
// Allocate a JSArray without elements and initialize the header fields.
- Node* AllocateUninitializedJSArrayWithoutElements(Node* array_map,
- Node* length,
- Node* allocation_site);
+ Node* AllocateUninitializedJSArrayWithoutElements(
+ Node* array_map, Node* length, Node* allocation_site = nullptr);
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
// The ParameterMode argument is only used for the capacity parameter.
@@ -827,9 +870,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
- Node* TypedArraySpeciesCreateByLength(Node* context, Node* originalArray,
- Node* len);
-
void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
Node* to_index,
Heap::RootListIndex value_root_index,
@@ -1029,10 +1069,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
char const* method_name);
- // Throws a TypeError for {method_name}. Terminates the current block.
- void ThrowIncompatibleMethodReceiver(Node* context, char const* method_name,
- Node* receiver);
-
// Throws a TypeError for {method_name} if {value} is not of the given
// instance type. Returns {value}'s map.
Node* ThrowIfNotInstanceType(Node* context, Node* value,
@@ -1068,6 +1104,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsCallableMap(Node* map);
Node* IsCallable(Node* object);
Node* IsCell(Node* object);
+ Node* IsCode(Node* object);
Node* IsConsStringInstanceType(Node* instance_type);
Node* IsConstructorMap(Node* map);
Node* IsConstructor(Node* object);
@@ -1077,6 +1114,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsExternalStringInstanceType(Node* instance_type);
TNode<BoolT> IsFastJSArray(SloppyTNode<Object> object,
SloppyTNode<Context> context);
+ TNode<BoolT> IsFastJSArrayWithNoCustomIteration(
+ TNode<Object> object, TNode<Context> context,
+ TNode<Context> native_context);
+ Node* IsFeedbackCell(Node* object);
Node* IsFeedbackVector(Node* object);
Node* IsFixedArray(Node* object);
Node* IsFixedArraySubclass(Node* object);
@@ -1092,14 +1133,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsJSArrayInstanceType(Node* instance_type);
Node* IsJSArrayMap(Node* object);
Node* IsJSArray(Node* object);
+ Node* IsJSAsyncGeneratorObject(Node* object);
Node* IsJSFunctionInstanceType(Node* instance_type);
Node* IsJSFunctionMap(Node* object);
Node* IsJSFunction(Node* object);
+ Node* IsJSGeneratorObject(Node* object);
Node* IsJSGlobalProxyInstanceType(Node* instance_type);
Node* IsJSGlobalProxy(Node* object);
Node* IsJSObjectInstanceType(Node* instance_type);
Node* IsJSObjectMap(Node* map);
Node* IsJSObject(Node* object);
+ Node* IsJSPromiseMap(Node* map);
+ Node* IsJSPromise(Node* object);
Node* IsJSProxy(Node* object);
Node* IsJSReceiverInstanceType(Node* instance_type);
Node* IsJSReceiverMap(Node* map);
@@ -1119,14 +1164,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsOneByteStringInstanceType(Node* instance_type);
Node* IsPrimitiveInstanceType(Node* instance_type);
Node* IsPrivateSymbol(Node* object);
+ Node* IsPromiseCapability(Node* object);
Node* IsPropertyArray(Node* object);
Node* IsPropertyCell(Node* object);
Node* IsPrototypeInitialArrayPrototype(Node* context, Node* map);
+ TNode<BoolT> IsPrototypeTypedArrayPrototype(SloppyTNode<Context> context,
+ SloppyTNode<Map> map);
Node* IsSequentialStringInstanceType(Node* instance_type);
Node* IsShortExternalStringInstanceType(Node* instance_type);
Node* IsSpecialReceiverInstanceType(Node* instance_type);
Node* IsSpecialReceiverMap(Node* map);
- Node* IsSpeciesProtectorCellInvalid();
Node* IsStringInstanceType(Node* instance_type);
Node* IsString(Node* object);
Node* IsSymbolInstanceType(Node* instance_type);
@@ -1139,6 +1186,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return IsSharedFunctionInfoMap(LoadMap(object));
}
+ Node* IsPromiseThenProtectorCellInvalid();
+ Node* IsSpeciesProtectorCellInvalid();
+
// True iff |object| is a Smi or a HeapNumber.
Node* IsNumber(Node* object);
// True iff |object| is a Smi or a HeapNumber or a BigInt.
@@ -1170,18 +1220,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Return the single character string with only {code}.
TNode<String> StringFromCharCode(TNode<Int32T> code);
- enum class SubStringFlags { NONE, FROM_TO_ARE_BOUNDED };
-
// Return a new string object which holds a substring containing the range
// [from,to[ of string. |from| and |to| are expected to be tagged.
- // If flags has the value FROM_TO_ARE_BOUNDED then from and to are in
- // the range [0, string-length)
- Node* SubString(Node* context, Node* string, Node* from, Node* to,
- SubStringFlags flags = SubStringFlags::NONE);
+ TNode<String> SubString(TNode<String> string, TNode<IntPtrT> from,
+ TNode<IntPtrT> to);
// Return a new string object produced by concatenating |first| with |second|.
- Node* StringAdd(Node* context, Node* first, Node* second,
- AllocationFlags flags = kNone);
+ TNode<String> StringAdd(Node* context, TNode<String> first,
+ TNode<String> second, AllocationFlags flags = kNone);
// Check if |string| is an indirect (thin or flat cons) string type that can
// be dereferenced by DerefIndirectString.
@@ -1205,9 +1251,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
// Convert a String to a Number.
- TNode<Number> StringToNumber(SloppyTNode<String> input);
+ TNode<Number> StringToNumber(TNode<String> input);
// Convert a Number to a String.
- Node* NumberToString(Node* input);
+ TNode<String> NumberToString(TNode<Number> input);
// Convert an object to a name.
Node* ToName(Node* context, Node* input);
// Convert a Non-Number object to a Number.
@@ -1224,6 +1270,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> ToNumber(
SloppyTNode<Context> context, SloppyTNode<Object> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
+ TNode<Number> ToNumber_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
+
+ // Try to convert an object to a BigInt. Throws on failure (e.g. for Numbers).
+ // https://tc39.github.io/proposal-bigint/#sec-to-bigint
+ TNode<BigInt> ToBigInt(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
@@ -1240,16 +1293,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Convert any object to a Primitive.
Node* JSReceiverToPrimitive(Node* context, Node* input);
+ TNode<JSReceiver> ToObject(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
+
enum ToIntegerTruncationMode {
kNoTruncation,
kTruncateMinusZero,
};
// ES6 7.1.17 ToIndex, but jumps to range_error if the result is not a Smi.
- Node* ToSmiIndex(Node* const input, Node* const context, Label* range_error);
+ TNode<Smi> ToSmiIndex(TNode<Object> input, TNode<Context> context,
+ Label* range_error);
// ES6 7.1.15 ToLength, but jumps to range_error if the result is not a Smi.
- Node* ToSmiLength(Node* input, Node* const context, Label* range_error);
+ TNode<Smi> ToSmiLength(TNode<Object> input, TNode<Context> context,
+ Label* range_error);
// ES6 7.1.15 ToLength, but with inlined fast path.
Node* ToLength_Inline(Node* const context, Node* const input);
@@ -1287,7 +1345,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
template <typename BitField>
TNode<Uint32T> DecodeWord32FromWord(SloppyTNode<WordT> word) {
return UncheckedCast<Uint32T>(
- TruncateWordToWord32(Signed(DecodeWord<BitField>(word))));
+ TruncateIntPtrToInt32(Signed(DecodeWord<BitField>(word))));
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
@@ -1554,36 +1612,47 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_not_found, Label* if_bailout,
GetOwnPropertyMode mode);
- Node* GetProperty(Node* context, Node* receiver, Handle<Name> name) {
+ TNode<Object> GetProperty(SloppyTNode<Context> context,
+ SloppyTNode<Object> receiver, Handle<Name> name) {
return GetProperty(context, receiver, HeapConstant(name));
}
- Node* GetProperty(Node* context, Node* receiver, Node* const name) {
- return CallStub(CodeFactory::GetProperty(isolate()), context, receiver,
- name);
+ TNode<Object> GetProperty(SloppyTNode<Context> context,
+ SloppyTNode<Object> receiver,
+ SloppyTNode<Object> name) {
+ return UncheckedCast<Object>(
+ CallStub(CodeFactory::GetProperty(isolate()), context, receiver, name));
}
Node* GetMethod(Node* context, Node* object, Handle<Name> name,
Label* if_null_or_undefined);
template <class... TArgs>
- Node* CallBuiltin(Builtins::Name id, Node* context, TArgs... args) {
+ TNode<Object> CallBuiltin(Builtins::Name id, SloppyTNode<Context> context,
+ TArgs... args) {
DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
!Builtins::IsLazy(id));
- return CallStub(Builtins::CallableFor(isolate(), id), context, args...);
+ return UncheckedCast<Object>(
+ CallStub(Builtins::CallableFor(isolate(), id), context, args...));
}
template <class... TArgs>
- Node* TailCallBuiltin(Builtins::Name id, Node* context, TArgs... args) {
+ TNode<Object> TailCallBuiltin(Builtins::Name id, SloppyTNode<Context> context,
+ TArgs... args) {
DCHECK_IMPLIES(Builtins::KindOf(id) == Builtins::TFJ,
!Builtins::IsLazy(id));
- return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
+ return UncheckedCast<Object>(
+ TailCallStub(Builtins::CallableFor(isolate(), id), context, args...));
}
void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
Node* name_index, Variable* var_details,
Variable* var_value);
+ void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+ Node* name_index, Node* details,
+ Variable* var_value);
+
void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value);
@@ -1703,20 +1772,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* Int32ToUint8Clamped(Node* int32_value);
Node* Float64ToUint8Clamped(Node* float64_value);
- Node* PrepareValueForWriteToTypedArray(Node* key, ElementsKind elements_kind,
- Label* bailout);
+ Node* PrepareValueForWriteToTypedArray(TNode<Object> input,
+ ElementsKind elements_kind,
+ TNode<Context> context);
// Store value to an elements array with given elements kind.
void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value,
ParameterMode mode);
+ void EmitBigTypedArrayElementStore(TNode<JSTypedArray> object,
+ TNode<FixedTypedArrayBase> elements,
+ TNode<IntPtrT> intptr_key,
+ TNode<Object> value,
+ TNode<Context> context,
+ Label* opt_if_neutered);
+
void EmitElementStore(Node* object, Node* key, Node* value, bool is_jsarray,
ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode, Label* bailout);
+ KeyedAccessStoreMode store_mode, Label* bailout,
+ Node* context);
Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
- Node* length, Node* key, ParameterMode mode,
- bool is_js_array, Label* bailout);
+ KeyedAccessStoreMode store_mode, Node* length,
+ Node* key, ParameterMode mode, bool is_js_array,
+ Label* bailout);
Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
Node* length, ParameterMode mode, Label* bailout);
@@ -1833,12 +1912,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SloppyTNode<Context> context,
HasPropertyLookupMode mode);
- Node* ClassOf(Node* object);
-
Node* Typeof(Node* value);
Node* GetSuperConstructor(Node* value, Node* context);
+ Node* SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor);
+
Node* InstanceOf(Node* object, Node* callable, Node* context);
// Debug helpers
@@ -1856,11 +1936,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Promise helpers
Node* IsPromiseHookEnabledOrDebugIsActive();
- Node* AllocatePromiseReactionJobInfo(Node* value, Node* tasks,
- Node* deferred_promise,
- Node* deferred_on_resolve,
- Node* deferred_on_reject, Node* context);
-
// Helpers for StackFrame markers.
Node* MarkerIsFrameType(Node* marker_or_function,
StackFrame::Type frame_type);
@@ -1906,17 +1981,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
Label* if_found, Variable* var_name_index,
Label* if_not_found);
+ Node* DescriptorNumberToIndex(SloppyTNode<Uint32T> descriptor_number);
// Implements DescriptorArray::ToKeyIndex.
// Returns an untagged IntPtr.
Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
// Implements DescriptorArray::GetKey.
Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
+ // Implements DescriptorArray::GetKey.
+ TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> descriptor_number);
Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout,
GetOwnPropertyMode mode = kCallJSGetter);
- Node* TryToIntptr(Node* key, Label* miss);
+ TNode<IntPtrT> TryToIntptr(Node* key, Label* miss);
void BranchIfPrototypesHaveNoElements(Node* receiver_map,
Label* definitely_no_elements,
@@ -1949,12 +2028,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
Label* bailout);
- Node* AllocateSlicedString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, Node* parent, Node* offset);
+ TNode<String> AllocateSlicedString(Heap::RootListIndex map_root_index,
+ TNode<Smi> length, TNode<String> parent,
+ TNode<Smi> offset);
- Node* AllocateConsString(Heap::RootListIndex map_root_index,
- TNode<Smi> length, Node* first, Node* second,
- AllocationFlags flags);
+ TNode<String> AllocateConsString(Heap::RootListIndex map_root_index,
+ TNode<Smi> length, TNode<String> first,
+ TNode<String> second, AllocationFlags flags);
// Implements DescriptorArray::number_of_entries.
// Returns an untagged int32.
@@ -1967,10 +2047,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* CollectFeedbackForString(Node* instance_type);
void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal,
Variable* var_type_feedback = nullptr);
- Node* AllocAndCopyStringCharacters(Node* context, Node* from,
- Node* from_instance_type,
- TNode<IntPtrT> from_index,
- TNode<Smi> character_count);
+ TNode<String> AllocAndCopyStringCharacters(Node* from,
+ Node* from_instance_type,
+ TNode<IntPtrT> from_index,
+ TNode<Smi> character_count);
static const int kElementLoopUnrollThreshold = 8;
@@ -2084,7 +2164,7 @@ class ToDirectStringAssembler : public CodeStubAssembler {
// string. The result can be either a sequential or external string.
// Jumps to if_bailout if the string if the string is indirect and cannot
// be unpacked.
- Node* TryToDirect(Label* if_bailout);
+ TNode<String> TryToDirect(Label* if_bailout);
// Returns a pointer to the beginning of the string data.
// Jumps to if_bailout if the external string cannot be unpacked.
@@ -2100,7 +2180,9 @@ class ToDirectStringAssembler : public CodeStubAssembler {
Node* string() { return var_string_.value(); }
Node* instance_type() { return var_instance_type_.value(); }
- Node* offset() { return var_offset_.value(); }
+ TNode<IntPtrT> offset() {
+ return UncheckedCast<IntPtrT>(var_offset_.value());
+ }
Node* is_external() { return var_is_external_.value(); }
private:
@@ -2141,31 +2223,39 @@ class ToDirectStringAssembler : public CodeStubAssembler {
CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
nullptr, nullptr)
-#define CSA_ASSERT_GET_CONDITION(x, ...) (x)
-#define CSA_ASSERT_GET_CONDITION_STR(x, ...) #x
+#define CSA_ASSERT_GET_FIRST(x, ...) (x)
+#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
// CSA_ASSERT(csa, <condition>, <extra values to print...>)
// We have to jump through some hoops to allow <extra values to print...> to be
// empty.
-#define CSA_ASSERT(csa, ...) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- return base::implicit_cast<compiler::SloppyTNode<Word32T>>( \
- EXPAND(CSA_ASSERT_GET_CONDITION(__VA_ARGS__))); \
- }, \
- EXPAND(CSA_ASSERT_GET_CONDITION_STR(__VA_ARGS__)), __FILE__, __LINE__, \
+#define CSA_ASSERT(csa, ...) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ return base::implicit_cast<compiler::SloppyTNode<Word32T>>( \
+ EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
+ }, \
+ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- compiler::Node* const argc = \
- (csa)->Parameter(Descriptor::kActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- SmiFromWord32((csa)->Parameter(Descriptor::kActualArgumentsCount)), \
+// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
+// <extra values to print...>)
+
+#define CSA_ASSERT_BRANCH(csa, ...) \
+ (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
+ EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
+ __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
+
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ compiler::Node* const argc = \
+ (csa)->Parameter(Descriptor::kActualArgumentsCount); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ SmiFromInt32((csa)->Parameter(Descriptor::kActualArgumentsCount)), \
"argc")
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
@@ -2182,6 +2272,7 @@ class ToDirectStringAssembler : public CodeStubAssembler {
TVariable<type> name(CSA_DEBUG_INFO(name), __VA_ARGS__)
#else // DEBUG
#define CSA_ASSERT(csa, ...) ((void)0)
+#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
#define BIND(label) Bind(label)
#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__)
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 2b98a5bfc7..cfe16d268c 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -71,9 +71,9 @@ void CodeStubDescriptor::Initialize(Register stack_parameter_count,
bool CodeStub::FindCodeInCache(Code** code_out) {
- NumberDictionary* stubs = isolate()->heap()->code_stubs();
+ SimpleNumberDictionary* stubs = isolate()->heap()->code_stubs();
int index = stubs->FindEntry(isolate(), GetKey());
- if (index != NumberDictionary::kNotFound) {
+ if (index != SimpleNumberDictionary::kNotFound) {
*code_out = Code::cast(stubs->ValueAt(index));
return true;
}
@@ -97,10 +97,10 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
void CodeStub::DeleteStubFromCacheForTesting() {
Heap* heap = isolate_->heap();
- Handle<NumberDictionary> dict(heap->code_stubs());
+ Handle<SimpleNumberDictionary> dict(heap->code_stubs());
int entry = dict->FindEntry(GetKey());
- DCHECK_NE(NumberDictionary::kNotFound, entry);
- dict = NumberDictionary::DeleteEntry(dict, entry);
+ DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
+ dict = SimpleNumberDictionary::DeleteEntry(dict, entry);
heap->SetRootCodeStubs(*dict);
}
@@ -121,17 +121,17 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Generate(&masm);
}
- // Allocate the handler table.
- Handle<HandlerTable> table = GenerateHandlerTable();
+ // Generate the handler table.
+ int handler_table_offset = GenerateHandlerTable(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(isolate(), &desc);
// Copy the generated code into a heap object.
Handle<Code> new_object = factory->NewCode(
- desc, Code::STUB, masm.CodeObject(), Builtins::kNoBuiltinId, table,
+ desc, Code::STUB, masm.CodeObject(), Builtins::kNoBuiltinId,
MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate()),
- NeedsImmovableCode(), GetKey());
+ NeedsImmovableCode(), GetKey(), false, 0, 0, handler_table_offset);
return new_object;
}
@@ -166,8 +166,8 @@ Handle<Code> CodeStub::GetCode() {
#endif
// Update the dictionary and the root in Heap.
- Handle<NumberDictionary> dict =
- NumberDictionary::Set(handle(heap->code_stubs()), GetKey(), new_object);
+ Handle<SimpleNumberDictionary> dict = SimpleNumberDictionary::Set(
+ handle(heap->code_stubs()), GetKey(), new_object);
heap->SetRootCodeStubs(*dict);
code = *new_object;
}
@@ -225,9 +225,7 @@ void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
}
}
-Handle<HandlerTable> PlatformCodeStub::GenerateHandlerTable() {
- return HandlerTable::Empty(isolate());
-}
+int PlatformCodeStub::GenerateHandlerTable(MacroAssembler* masm) { return 0; }
static void InitializeDescriptorDispatchedCall(CodeStub* stub,
void** value_out) {
@@ -289,7 +287,7 @@ TF_STUB(StringAddStub, CodeStubAssembler) {
CodeStubAssembler::AllocationFlag allocation_flags =
(pretenure_flag == TENURED) ? CodeStubAssembler::kPretenured
: CodeStubAssembler::kNone;
- Return(StringAdd(context, left, right, allocation_flags));
+ Return(StringAdd(context, CAST(left), CAST(right), allocation_flags));
} else {
Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
pretenure_flag);
@@ -332,7 +330,7 @@ TF_STUB(ElementsTransitionAndStoreStub, CodeStubAssembler) {
TransitionElementsKind(receiver, map, stub->from_kind(), stub->to_kind(),
stub->is_jsarray(), &miss);
EmitElementStore(receiver, key, value, stub->is_jsarray(), stub->to_kind(),
- stub->store_mode(), &miss);
+ stub->store_mode(), &miss, context);
Return(value);
}
@@ -434,11 +432,10 @@ TF_STUB(LoadIndexedInterceptorStub, CodeStubAssembler) {
vector);
}
-Handle<HandlerTable> JSEntryStub::GenerateHandlerTable() {
- Handle<FixedArray> handler_table =
- isolate()->factory()->NewFixedArray(1, TENURED);
- handler_table->set(0, Smi::FromInt(handler_offset_));
- return Handle<HandlerTable>::cast(handler_table);
+int JSEntryStub::GenerateHandlerTable(MacroAssembler* masm) {
+ int handler_table_offset = HandlerTable::EmitReturnTableStart(masm, 1);
+ HandlerTable::EmitReturnEntry(masm, 0, handler_offset_);
+ return handler_table_offset;
}
@@ -524,7 +521,7 @@ TF_STUB(StoreFastElementStub, CodeStubAssembler) {
Label miss(this);
EmitElementStore(receiver, key, value, stub->is_js_array(),
- stub->elements_kind(), stub->store_mode(), &miss);
+ stub->elements_kind(), stub->store_mode(), &miss, context);
Return(value);
BIND(&miss);
@@ -541,12 +538,13 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
StoreFastElementStub(isolate, false, HOLEY_ELEMENTS, STANDARD_STORE)
.GetCode();
StoreFastElementStub(isolate, false, HOLEY_ELEMENTS,
- STORE_AND_GROW_NO_TRANSITION)
+ STORE_AND_GROW_NO_TRANSITION_HANDLE_COW)
.GetCode();
for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
ElementsKind kind = static_cast<ElementsKind>(i);
StoreFastElementStub(isolate, true, kind, STANDARD_STORE).GetCode();
- StoreFastElementStub(isolate, true, kind, STORE_AND_GROW_NO_TRANSITION)
+ StoreFastElementStub(isolate, true, kind,
+ STORE_AND_GROW_NO_TRANSITION_HANDLE_COW)
.GetCode();
}
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 751a89fdbd..6d35af1100 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -288,7 +288,7 @@ class PlatformCodeStub : public CodeStub {
virtual void Generate(MacroAssembler* masm) = 0;
// Generates the exception handler table for the stub.
- virtual Handle<HandlerTable> GenerateHandlerTable();
+ virtual int GenerateHandlerTable(MacroAssembler* masm);
DEFINE_CODE_STUB_BASE(PlatformCodeStub, CodeStub);
};
@@ -692,7 +692,7 @@ class JSEntryStub : public PlatformCodeStub {
}
private:
- Handle<HandlerTable> GenerateHandlerTable() override;
+ int GenerateHandlerTable(MacroAssembler* masm) override;
void PrintName(std::ostream& os) const override { // NOLINT
os << (type() == StackFrame::ENTRY ? "JSEntryStub"
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 927e09a940..046f692c07 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -69,7 +69,7 @@ void CompilationSubCache::Age() {
}
void CompilationSubCache::Iterate(RootVisitor* v) {
- v->VisitRootPointers(Root::kCompilationCache, &tables_[0],
+ v->VisitRootPointers(Root::kCompilationCache, nullptr, &tables_[0],
&tables_[generations_]);
}
@@ -123,11 +123,11 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
-InfoVectorPair CompilationCacheScript::Lookup(
+MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
- InfoVectorPair result;
+ MaybeHandle<SharedFunctionInfo> result;
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
@@ -135,19 +135,15 @@ InfoVectorPair CompilationCacheScript::Lookup(
const int generation = 0;
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
- InfoVectorPair probe = table->LookupScript(source, context, language_mode);
- if (probe.has_shared()) {
- Handle<SharedFunctionInfo> function_info(probe.shared(), isolate());
- Handle<Cell> vector_handle;
- if (probe.has_vector()) {
- vector_handle = Handle<Cell>(probe.vector(), isolate());
- }
+ MaybeHandle<SharedFunctionInfo> probe =
+ table->LookupScript(source, context, language_mode);
+ Handle<SharedFunctionInfo> function_info;
+ if (probe.ToHandle(&function_info)) {
// Break when we've found a suitable shared function info that
// matches the origin.
if (HasOrigin(function_info, name, line_offset, column_offset,
resource_options)) {
- result = InfoVectorPair(*function_info,
- probe.has_vector() ? *vector_handle : nullptr);
+ result = scope.CloseAndEscape(function_info);
}
}
}
@@ -155,19 +151,13 @@ InfoVectorPair CompilationCacheScript::Lookup(
// Once outside the manacles of the handle scope, we need to recheck
// to see if we actually found a cached script. If so, we return a
// handle created in the caller's handle scope.
- if (result.has_shared()) {
+ Handle<SharedFunctionInfo> function_info;
+ if (result.ToHandle(&function_info)) {
#ifdef DEBUG
// Since HasOrigin can allocate, we need to protect the SharedFunctionInfo
- // and the FeedbackVector with handles during the call.
- Handle<SharedFunctionInfo> shared(result.shared(), isolate());
- Handle<Cell> vector_handle;
- if (result.has_vector()) {
- vector_handle = Handle<Cell>(result.vector(), isolate());
- }
- DCHECK(
- HasOrigin(shared, name, line_offset, column_offset, resource_options));
- result =
- InfoVectorPair(*shared, result.has_vector() ? *vector_handle : nullptr);
+ // with handles during the call.
+ DCHECK(HasOrigin(function_info, name, line_offset, column_offset,
+ resource_options));
#endif
isolate()->counters()->compilation_cache_hits()->Increment();
} else {
@@ -178,22 +168,23 @@ InfoVectorPair CompilationCacheScript::Lookup(
void CompilationCacheScript::Put(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals) {
+ Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(CompilationCacheTable::PutScript(
- table, source, context, language_mode, function_info, literals));
+ SetFirstTable(CompilationCacheTable::PutScript(table, source, context,
+ language_mode, function_info));
}
-InfoVectorPair CompilationCacheEval::Lookup(
- Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> native_context, LanguageMode language_mode, int position) {
+InfoCellPair CompilationCacheEval::Lookup(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> native_context,
+ LanguageMode language_mode,
+ int position) {
HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
- InfoVectorPair result;
+ InfoCellPair result;
const int generation = 0;
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
@@ -211,12 +202,13 @@ void CompilationCacheEval::Put(Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
Handle<SharedFunctionInfo> function_info,
Handle<Context> native_context,
- Handle<Cell> literals, int position) {
+ Handle<FeedbackCell> feedback_cell,
+ int position) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
table =
CompilationCacheTable::PutEval(table, source, outer_info, function_info,
- native_context, literals, position);
+ native_context, feedback_cell, position);
SetFirstTable(table);
}
@@ -263,21 +255,22 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
script_.Remove(function_info);
}
-InfoVectorPair CompilationCache::LookupScript(
+MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source, MaybeHandle<Object> name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Context> context, LanguageMode language_mode) {
- InfoVectorPair empty_result;
- if (!IsEnabled()) return empty_result;
+ if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
return script_.Lookup(source, name, line_offset, column_offset,
resource_options, context, language_mode);
}
-InfoVectorPair CompilationCache::LookupEval(
- Handle<String> source, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, LanguageMode language_mode, int position) {
- InfoVectorPair result;
+InfoCellPair CompilationCache::LookupEval(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context,
+ LanguageMode language_mode,
+ int position) {
+ InfoCellPair result;
if (!IsEnabled()) return result;
if (context->IsNativeContext()) {
@@ -302,29 +295,29 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals) {
+ Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
- script_.Put(source, context, language_mode, function_info, literals);
+ script_.Put(source, context, language_mode, function_info);
}
void CompilationCache::PutEval(Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals, int position) {
+ Handle<FeedbackCell> feedback_cell,
+ int position) {
if (!IsEnabled()) return;
HandleScope scope(isolate());
if (context->IsNativeContext()) {
- eval_global_.Put(source, outer_info, function_info, context, literals,
+ eval_global_.Put(source, outer_info, function_info, context, feedback_cell,
position);
} else {
DCHECK_NE(position, kNoSourcePosition);
Handle<Context> native_context(context->native_context(), isolate());
eval_contextual_.Put(source, outer_info, function_info, native_context,
- literals, position);
+ feedback_cell, position);
}
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 3c9751ac2f..0072d3b487 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -79,14 +79,16 @@ class CompilationCacheScript : public CompilationSubCache {
public:
explicit CompilationCacheScript(Isolate* isolate);
- InfoVectorPair Lookup(Handle<String> source, MaybeHandle<Object> name,
- int line_offset, int column_offset,
- ScriptOriginOptions resource_options,
- Handle<Context> context, LanguageMode language_mode);
+ MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
+ MaybeHandle<Object> name,
+ int line_offset, int column_offset,
+ ScriptOriginOptions resource_options,
+ Handle<Context> context,
+ LanguageMode language_mode);
void Put(Handle<String> source, Handle<Context> context,
- LanguageMode language_mode, Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals);
+ LanguageMode language_mode,
+ Handle<SharedFunctionInfo> function_info);
private:
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
@@ -114,14 +116,15 @@ class CompilationCacheEval: public CompilationSubCache {
explicit CompilationCacheEval(Isolate* isolate)
: CompilationSubCache(isolate, 1) {}
- InfoVectorPair Lookup(Handle<String> source,
- Handle<SharedFunctionInfo> outer_info,
- Handle<Context> native_context,
- LanguageMode language_mode, int position);
+ InfoCellPair Lookup(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> native_context,
+ LanguageMode language_mode, int position);
void Put(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<SharedFunctionInfo> function_info,
- Handle<Context> native_context, Handle<Cell> literals, int position);
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
@@ -152,19 +155,18 @@ class CompilationCache {
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
- InfoVectorPair LookupScript(Handle<String> source, MaybeHandle<Object> name,
- int line_offset, int column_offset,
- ScriptOriginOptions resource_options,
- Handle<Context> context,
- LanguageMode language_mode);
+ MaybeHandle<SharedFunctionInfo> LookupScript(
+ Handle<String> source, MaybeHandle<Object> name, int line_offset,
+ int column_offset, ScriptOriginOptions resource_options,
+ Handle<Context> context, LanguageMode language_mode);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
- InfoVectorPair LookupEval(Handle<String> source,
- Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, LanguageMode language_mode,
- int position);
+ InfoCellPair LookupEval(Handle<String> source,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, LanguageMode language_mode,
+ int position);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
@@ -175,15 +177,14 @@ class CompilationCache {
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source, Handle<Context> context,
LanguageMode language_mode,
- Handle<SharedFunctionInfo> function_info,
- Handle<Cell> literals);
+ Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping.
void PutEval(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
- Handle<SharedFunctionInfo> function_info, Handle<Cell> literals,
- int position);
+ Handle<SharedFunctionInfo> function_info,
+ Handle<FeedbackCell> feedback_cell, int position);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index 990d536e38..fa26e67b1a 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DEPENDENCIES_H_
-#define V8_DEPENDENCIES_H_
+#ifndef V8_COMPILATION_DEPENDENCIES_H_
+#define V8_COMPILATION_DEPENDENCIES_H_
#include "src/handles.h"
#include "src/objects.h"
@@ -71,4 +71,4 @@ class CompilationDependencies {
} // namespace internal
} // namespace v8
-#endif // V8_DEPENDENCIES_H_
+#endif // V8_COMPILATION_DEPENDENCIES_H_
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index 27e6dbb9da..85d887ceb7 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -16,12 +16,9 @@
namespace v8 {
namespace internal {
-// TODO(mvstanton): the Code::OPTIMIZED_FUNCTION constant below is
-// bogus, it's just that I've eliminated Code::FUNCTION and there isn't
-// a "better" value to put in this place.
CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
FunctionLiteral* literal)
- : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, BASE, zone) {
+ : CompilationInfo({}, AbstractCode::INTERPRETED_FUNCTION, zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this CompilationInfo. As such,
@@ -39,7 +36,7 @@ CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure)
- : CompilationInfo({}, Code::OPTIMIZED_FUNCTION, OPTIMIZE, zone) {
+ : CompilationInfo({}, AbstractCode::OPTIMIZED_FUNCTION, zone) {
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
@@ -47,6 +44,8 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
+ if (!FLAG_turbo_disable_switch_jump_table) SetFlag(kSwitchJumpTableEnabled);
+ if (FLAG_untrusted_code_mitigations) MarkAsPoisoningRegisterArguments();
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
@@ -58,24 +57,27 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
CompilationInfo::CompilationInfo(Vector<const char> debug_name, Zone* zone,
Code::Kind code_kind)
- : CompilationInfo(debug_name, code_kind, STUB, zone) {}
+ : CompilationInfo(debug_name, static_cast<AbstractCode::Kind>(code_kind),
+ zone) {
+ if (code_kind == Code::BYTECODE_HANDLER && has_untrusted_code_mitigations()) {
+ SetFlag(CompilationInfo::kGenerateSpeculationPoisonOnEntry);
+ }
+}
CompilationInfo::CompilationInfo(Vector<const char> debug_name,
- Code::Kind code_kind, Mode mode, Zone* zone)
+ AbstractCode::Kind code_kind, Zone* zone)
: literal_(nullptr),
source_range_map_(nullptr),
flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
code_kind_(code_kind),
stub_key_(0),
builtin_index_(Builtins::kNoBuiltinId),
- mode_(mode),
osr_offset_(BailoutId::None()),
feedback_vector_spec_(zone),
zone_(zone),
deferred_handles_(nullptr),
dependencies_(nullptr),
bailout_reason_(BailoutReason::kNoReason),
- parameter_count_(0),
optimization_id_(-1),
debug_name_(debug_name) {}
@@ -94,15 +96,15 @@ DeclarationScope* CompilationInfo::scope() const {
}
int CompilationInfo::num_parameters() const {
- return !IsStub() ? scope()->num_parameters() : parameter_count_;
+ DCHECK(!IsStub());
+ return scope()->num_parameters();
}
int CompilationInfo::num_parameters_including_this() const {
- return num_parameters() + (is_this_defined() ? 1 : 0);
+ DCHECK(!IsStub());
+ return scope()->num_parameters() + 1;
}
-bool CompilationInfo::is_this_defined() const { return !IsStub(); }
-
void CompilationInfo::set_deferred_handles(
std::shared_ptr<DeferredHandles> deferred_handles) {
DCHECK_NULL(deferred_handles_);
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index bb5812002e..e68b6d88b4 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -45,12 +45,16 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
kAccessorInliningEnabled = 1 << 3,
kFunctionContextSpecializing = 1 << 4,
kInliningEnabled = 1 << 5,
- kDisableFutureOptimization = 1 << 6,
- kSplittingEnabled = 1 << 7,
- kSourcePositionsEnabled = 1 << 8,
- kBailoutOnUninitialized = 1 << 9,
- kLoopPeelingEnabled = 1 << 10,
- kUntrustedCodeMitigations = 1 << 11,
+ kPoisonLoads = 1 << 6,
+ kDisableFutureOptimization = 1 << 7,
+ kSplittingEnabled = 1 << 8,
+ kSourcePositionsEnabled = 1 << 9,
+ kBailoutOnUninitialized = 1 << 10,
+ kLoopPeelingEnabled = 1 << 11,
+ kUntrustedCodeMitigations = 1 << 12,
+ kSwitchJumpTableEnabled = 1 << 13,
+ kGenerateSpeculationPoisonOnEntry = 1 << 14,
+ kPoisonRegisterArguments = 1 << 15,
};
// TODO(mtrofin): investigate if this might be generalized outside wasm, with
@@ -60,9 +64,9 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
struct WasmCodeDesc {
CodeDesc code_desc;
size_t safepoint_table_offset = 0;
+ size_t handler_table_offset = 0;
uint32_t frame_slot_count = 0;
Handle<ByteArray> source_positions_table;
- MaybeHandle<HandlerTable> handler_table;
};
// Construct a compilation info for unoptimized compilation.
@@ -99,7 +103,11 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
bool has_shared_info() const { return !shared_info().is_null(); }
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
- Code::Kind code_kind() const { return code_kind_; }
+ AbstractCode::Kind abstract_code_kind() const { return code_kind_; }
+ Code::Kind code_kind() const {
+ DCHECK(code_kind_ < static_cast<AbstractCode::Kind>(Code::NUMBER_OF_KINDS));
+ return static_cast<Code::Kind>(code_kind_);
+ }
uint32_t stub_key() const { return stub_key_; }
void set_stub_key(uint32_t stub_key) { stub_key_ = stub_key; }
int32_t builtin_index() const { return builtin_index_; }
@@ -108,12 +116,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
JavaScriptFrame* osr_frame() const { return osr_frame_; }
int num_parameters() const;
int num_parameters_including_this() const;
- bool is_this_defined() const;
-
- void set_parameter_count(int parameter_count) {
- DCHECK(IsStub());
- parameter_count_ = parameter_count;
- }
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
@@ -154,6 +156,9 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
+ void MarkAsPoisonLoads() { SetFlag(kPoisonLoads); }
+ bool is_poison_loads() const { return GetFlag(kPoisonLoads); }
+
void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
@@ -169,6 +174,27 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
return GetFlag(kUntrustedCodeMitigations);
}
+ bool switch_jump_table_enabled() const {
+ return GetFlag(kSwitchJumpTableEnabled);
+ }
+
+ bool is_generating_speculation_poison_on_entry() const {
+ bool enabled = GetFlag(kGenerateSpeculationPoisonOnEntry);
+ DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
+ return enabled;
+ }
+
+ void MarkAsPoisoningRegisterArguments() {
+ DCHECK(has_untrusted_code_mitigations());
+ SetFlag(kGenerateSpeculationPoisonOnEntry);
+ SetFlag(kPoisonRegisterArguments);
+ }
+ bool is_poisoning_register_arguments() const {
+ bool enabled = GetFlag(kPoisonRegisterArguments);
+ DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
+ return enabled;
+ }
+
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
@@ -193,9 +219,17 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
JSGlobalObject* global_object() const;
// Accessors for the different compilation modes.
- bool IsOptimizing() const { return mode_ == OPTIMIZE; }
- bool IsStub() const { return mode_ == STUB; }
- bool IsWasm() const { return code_kind() == Code::WASM_FUNCTION; }
+ bool IsOptimizing() const {
+ return abstract_code_kind() == AbstractCode::OPTIMIZED_FUNCTION;
+ }
+ bool IsWasm() const {
+ return abstract_code_kind() == AbstractCode::WASM_FUNCTION;
+ }
+ bool IsStub() const {
+ return abstract_code_kind() != AbstractCode::OPTIMIZED_FUNCTION &&
+ abstract_code_kind() != AbstractCode::WASM_FUNCTION &&
+ abstract_code_kind() != AbstractCode::INTERPRETED_FUNCTION;
+ }
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
@@ -275,15 +309,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
WasmCodeDesc* wasm_code_desc() { return &wasm_code_desc_; }
private:
- // Compilation mode.
- // BASE is generated by the full codegen, optionally prepared for bailouts.
- // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
- enum Mode { BASE, OPTIMIZE, STUB };
-
- CompilationInfo(Vector<const char> debug_name, Code::Kind code_kind,
- Mode mode, Zone* zone);
-
- void SetMode(Mode mode) { mode_ = mode; }
+ CompilationInfo(Vector<const char> debug_name, AbstractCode::Kind code_kind,
+ Zone* zone);
void SetFlag(Flag flag) { flags_ |= flag; }
@@ -298,7 +325,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
unsigned flags_;
- Code::Kind code_kind_;
+ AbstractCode::Kind code_kind_;
uint32_t stub_key_;
int32_t builtin_index_;
@@ -310,8 +337,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
Handle<Code> code_;
WasmCodeDesc wasm_code_desc_;
- // Compilation mode flag and whether deoptimization is allowed.
- Mode mode_;
+ // Entry point when compiling for OSR, {BailoutId::None} otherwise.
BailoutId osr_offset_;
// Holds the bytecode array generated by the interpreter.
@@ -338,9 +364,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
InlinedFunctionList inlined_functions_;
- // Number of parameters used for compilation of stubs that require arguments.
- int parameter_count_;
-
int optimization_id_;
// The current OSR frame for specialization or {nullptr}.
diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/compilation-statistics.h
index 388117b10e..1f70336fcc 100644
--- a/deps/v8/src/compilation-statistics.h
+++ b/deps/v8/src/compilation-statistics.h
@@ -90,4 +90,4 @@ std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& s);
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILATION_STATISTICS_H_
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 1adfd090cd..27af96c85e 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -92,7 +92,7 @@ MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
MemoryPressureTask::~MemoryPressureTask() {}
void MemoryPressureTask::RunInternal() {
- dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
+ dispatcher_->AbortAll(BlockingBehavior::kDontBlock);
}
} // namespace
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index ee20e8d02e..240f025c1e 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -72,8 +72,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
public:
typedef uintptr_t JobId;
- enum class BlockingBehavior { kBlock, kDontBlock };
-
CompilerDispatcher(Isolate* isolate, Platform* platform,
size_t max_stack_size);
~CompilerDispatcher();
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 59872b2535..7dc73b146c 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -25,10 +25,10 @@ void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
- // TODO(mvstanton): We can't call EnsureLiterals here due to allocation,
- // but we probably shouldn't call set_code either, as this
+ // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
+ // allocation, but we probably shouldn't call set_code either, as this
// sometimes runs on the worker thread!
- // JSFunction::EnsureLiterals(function);
+ // JSFunction::EnsureFeedbackVector(function);
}
delete job;
}
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index d1d295f063..551b7c3563 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -23,8 +23,6 @@ class SharedFunctionInfo;
class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
public:
- enum class BlockingBehavior { kBlock, kDontBlock };
-
explicit OptimizingCompileDispatcher(Isolate* isolate)
: isolate_(isolate),
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
index 74b2352bd8..23a607a093 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
@@ -251,7 +251,7 @@ void UnoptimizedCompileJob::Compile(bool on_background_thread) {
}
compilation_job_.reset(interpreter::Interpreter::NewCompilationJob(
- parse_info_.get(), parse_info_->literal(), allocator_));
+ parse_info_.get(), parse_info_->literal(), allocator_, nullptr));
if (!compilation_job_.get()) {
parse_info_->pending_error_handler()->set_stack_overflow();
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index e2f8ee0f39..9b25832668 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -10,7 +10,6 @@
#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
-#include "src/ast/ast-numbering.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/optional.h"
@@ -31,11 +30,13 @@
#include "src/messages.h"
#include "src/objects/map.h"
#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
+#include "src/unicode-cache.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -370,20 +371,9 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job,
return status;
}
-bool Renumber(ParseInfo* parse_info,
- Compiler::EagerInnerFunctionLiterals* eager_literals) {
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- parse_info->on_background_thread()
- ? RuntimeCallCounterId::kCompileBackgroundRenumber
- : RuntimeCallCounterId::kCompileRenumber);
- return AstNumbering::Renumber(parse_info->stack_limit(), parse_info->zone(),
- parse_info->literal(), eager_literals);
-}
-
-std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
+std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJobs(
ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator) {
+ AccountingAllocator* allocator, CompilationJobList* inner_function_jobs) {
if (UseAsmWasm(literal, parse_info->is_asm_wasm_broken())) {
std::unique_ptr<CompilationJob> asm_job(
AsmJs::NewCompilationJob(parse_info, literal, allocator));
@@ -396,14 +386,27 @@ std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
// with a validation error or another error that could be solve by falling
// through to standard unoptimized compile.
}
+ ZoneVector<FunctionLiteral*> eager_inner_literals(0, parse_info->zone());
std::unique_ptr<CompilationJob> job(
- interpreter::Interpreter::NewCompilationJob(parse_info, literal,
- allocator));
+ interpreter::Interpreter::NewCompilationJob(
+ parse_info, literal, allocator, &eager_inner_literals));
- if (job->ExecuteJob() == CompilationJob::SUCCEEDED) {
- return job;
+ if (job->ExecuteJob() != CompilationJob::SUCCEEDED) {
+ // Compilation failed, return null.
+ return std::unique_ptr<CompilationJob>();
}
- return std::unique_ptr<CompilationJob>(); // Compilation failed, return null.
+
+ // Recursively compile eager inner literals.
+ for (FunctionLiteral* inner_literal : eager_inner_literals) {
+ std::unique_ptr<CompilationJob> inner_job(
+ PrepareAndExecuteUnoptimizedCompileJobs(
+ parse_info, inner_literal, allocator, inner_function_jobs));
+ // Compilation failed, return null.
+ if (!inner_job) return std::unique_ptr<CompilationJob>();
+ inner_function_jobs->emplace_front(std::move(inner_job));
+ }
+
+ return job;
}
std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
@@ -414,27 +417,16 @@ std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
DisallowHandleDereference no_deref;
DCHECK(inner_function_jobs->empty());
- Compiler::EagerInnerFunctionLiterals inner_literals;
- if (!Compiler::Analyze(parse_info, &inner_literals)) {
+ if (!Compiler::Analyze(parse_info)) {
return std::unique_ptr<CompilationJob>();
}
// Prepare and execute compilation of the outer-most function.
std::unique_ptr<CompilationJob> outer_function_job(
- PrepareAndExecuteUnoptimizedCompileJob(parse_info, parse_info->literal(),
- allocator));
+ PrepareAndExecuteUnoptimizedCompileJobs(parse_info, parse_info->literal(),
+ allocator, inner_function_jobs));
if (!outer_function_job) return std::unique_ptr<CompilationJob>();
- // Prepare and execute compilation jobs for eager inner functions.
- for (auto it : inner_literals) {
- FunctionLiteral* inner_literal = it->value();
- std::unique_ptr<CompilationJob> inner_job(
- PrepareAndExecuteUnoptimizedCompileJob(parse_info, inner_literal,
- allocator));
- if (!inner_job) return std::unique_ptr<CompilationJob>();
- inner_function_jobs->emplace_front(std::move(inner_job));
- }
-
// Character stream shouldn't be used again.
parse_info->ResetCharacterStream();
@@ -491,7 +483,7 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
if (osr_offset.IsNone()) {
- if (function->feedback_vector_cell()->value()->IsFeedbackVector()) {
+ if (function->feedback_cell()->value()->IsFeedbackVector()) {
FeedbackVector* feedback_vector = function->feedback_vector();
feedback_vector->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "GetCodeFromOptimizedCodeCache");
@@ -619,6 +611,11 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
function->ClearOptimizationMarker();
}
+ if (isolate->debug()->needs_check_on_function_call()) {
+ // Do not optimize when debugger needs to hook into every call.
+ return MaybeHandle<Code>();
+ }
+
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeCache(function, osr_offset)
.ToHandle(&cached_code)) {
@@ -771,6 +768,21 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
return CompilationJob::FAILED;
}
+bool FailWithPendingException(Isolate* isolate, ParseInfo* parse_info,
+ Compiler::ClearExceptionFlag flag) {
+ if (flag == Compiler::CLEAR_EXCEPTION) {
+ isolate->clear_pending_exception();
+ } else if (!isolate->has_pending_exception()) {
+ if (parse_info->pending_error_handler()->has_pending_error()) {
+ parse_info->pending_error_handler()->ReportErrors(
+ isolate, parse_info->script(), parse_info->ast_value_factory());
+ } else {
+ isolate->StackOverflow();
+ }
+ }
+ return false;
+}
+
MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
ParseInfo* parse_info, Isolate* isolate, CompilationJob* outer_function_job,
CompilationJobList* inner_function_jobs) {
@@ -792,7 +804,8 @@ MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
// Finalize compilation of the unoptimized bytecode or asm-js data.
if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
outer_function_job, inner_function_jobs)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ FailWithPendingException(isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
@@ -834,7 +847,8 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
parse_info, isolate->allocator(), &inner_function_jobs));
if (!outer_function_job) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ FailWithPendingException(isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
@@ -842,14 +856,124 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
&inner_function_jobs);
}
-bool FailWithPendingException(Isolate* isolate,
- Compiler::ClearExceptionFlag flag) {
- if (flag == Compiler::CLEAR_EXCEPTION) {
- isolate->clear_pending_exception();
- } else if (!isolate->has_pending_exception()) {
- isolate->StackOverflow();
+std::unique_ptr<CompilationJob> CompileTopLevelOnBackgroundThread(
+ ParseInfo* parse_info, AccountingAllocator* allocator,
+ CompilationJobList* inner_function_jobs) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileCodeBackground");
+ RuntimeCallTimerScope runtimeTimer(
+ parse_info->runtime_call_stats(),
+ parse_info->is_eval() ? RuntimeCallCounterId::kCompileBackgroundEval
+ : RuntimeCallCounterId::kCompileBackgroundScript);
+
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
+ parse_info->set_language_mode(
+ stricter_language_mode(parse_info->language_mode(), language_mode));
+
+ // Can't access scope info data off-main-thread.
+ DCHECK(!parse_info->consumed_preparsed_scope_data()->HasData());
+
+ // Generate the unoptimized bytecode or asm-js data.
+ std::unique_ptr<CompilationJob> outer_function_job(
+ GenerateUnoptimizedCode(parse_info, allocator, inner_function_jobs));
+ return outer_function_job;
+}
+
+class BackgroundCompileTask : public ScriptCompiler::ScriptStreamingTask {
+ public:
+ BackgroundCompileTask(ScriptStreamingData* source, Isolate* isolate);
+
+ virtual void Run();
+
+ private:
+ ScriptStreamingData* source_; // Not owned.
+ int stack_size_;
+ ScriptData* script_data_;
+ AccountingAllocator* allocator_;
+ TimedHistogram* timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTask);
+};
+
+BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* source,
+ Isolate* isolate)
+ : source_(source),
+ stack_size_(i::FLAG_stack_size),
+ script_data_(nullptr),
+ timer_(isolate->counters()->compile_script_on_background()) {
+ VMState<PARSER> state(isolate);
+
+ // Prepare the data for the internalization phase and compilation phase, which
+ // will happen in the main thread after parsing.
+ ParseInfo* info = new ParseInfo(isolate->allocator());
+ info->InitFromIsolate(isolate);
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
+ } else {
+ info->set_runtime_call_stats(nullptr);
+ }
+ info->set_toplevel();
+ std::unique_ptr<Utf16CharacterStream> stream(
+ ScannerStream::For(source->source_stream.get(), source->encoding,
+ info->runtime_call_stats()));
+ info->set_character_stream(std::move(stream));
+ info->set_unicode_cache(&source_->unicode_cache);
+ info->set_allow_lazy_parsing();
+ if (V8_UNLIKELY(info->block_coverage_enabled())) {
+ info->AllocateSourceRangeMap();
}
- return false;
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
+ info->set_language_mode(
+ stricter_language_mode(info->language_mode(), language_mode));
+
+ source->info.reset(info);
+ allocator_ = isolate->allocator();
+
+ // Parser needs to stay alive for finalizing the parsing on the main
+ // thread.
+ source_->parser.reset(new Parser(source_->info.get()));
+ source_->parser->DeserializeScopeChain(source_->info.get(),
+ MaybeHandle<ScopeInfo>());
+}
+
+void BackgroundCompileTask::Run() {
+ TimedHistogramScope timer(timer_);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
+ source_->info->set_on_background_thread(true);
+
+ // Reset the stack limit of the parser to reflect correctly that we're on a
+ // background thread.
+ uintptr_t old_stack_limit = source_->info->stack_limit();
+ uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
+ source_->info->set_stack_limit(stack_limit);
+ source_->parser->set_stack_limit(stack_limit);
+
+ source_->parser->ParseOnBackground(source_->info.get());
+ if (FLAG_background_compile && source_->info->literal() != nullptr) {
+ // Parsing has succeeded, compile.
+ source_->outer_function_job = CompileTopLevelOnBackgroundThread(
+ source_->info.get(), allocator_, &source_->inner_function_jobs);
+ }
+
+ if (script_data_ != nullptr) {
+ source_->cached_data.reset(new ScriptCompiler::CachedData(
+ script_data_->data(), script_data_->length(),
+ ScriptCompiler::CachedData::BufferOwned));
+ script_data_->ReleaseDataOwnership();
+ delete script_data_;
+ script_data_ = nullptr;
+ }
+
+ source_->info->EmitBackgroundParseStatisticsOnBackgroundThread();
+
+ source_->info->set_on_background_thread(false);
+ source_->info->set_stack_limit(old_stack_limit);
}
} // namespace
@@ -857,8 +981,7 @@ bool FailWithPendingException(Isolate* isolate,
// ----------------------------------------------------------------------------
// Implementation of Compiler
-bool Compiler::Analyze(ParseInfo* parse_info,
- EagerInnerFunctionLiterals* eager_literals) {
+bool Compiler::Analyze(ParseInfo* parse_info) {
DCHECK_NOT_NULL(parse_info->literal());
RuntimeCallTimerScope runtimeTimer(
parse_info->runtime_call_stats(),
@@ -866,8 +989,7 @@ bool Compiler::Analyze(ParseInfo* parse_info,
? RuntimeCallCounterId::kCompileBackgroundAnalyse
: RuntimeCallCounterId::kCompileAnalyse);
if (!Rewriter::Rewrite(parse_info)) return false;
- DeclarationScope::Analyze(parse_info);
- if (!Renumber(parse_info, eager_literals)) return false;
+ if (!DeclarationScope::Analyze(parse_info)) return false;
return true;
}
@@ -897,18 +1019,19 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
+ // Set up parse info.
+ ParseInfo parse_info(shared_info);
+ parse_info.set_lazy_compile();
+
// Check if the compiler dispatcher has shared_info enqueued for compile.
CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
if (dispatcher->IsEnqueued(shared_info)) {
if (!dispatcher->FinishNow(shared_info)) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
return true;
}
- // Set up parse info.
- ParseInfo parse_info(shared_info);
- parse_info.set_lazy_compile();
if (FLAG_preparser_scope_analysis) {
if (shared_info->HasPreParsedScopeData()) {
Handle<PreParsedScopeData> data(
@@ -922,7 +1045,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
// Parse and update ParseInfo with the results.
if (!parsing::ParseFunction(&parse_info, shared_info, isolate)) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
// Generate the unoptimized bytecode or asm-js data.
@@ -930,7 +1053,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
&parse_info, isolate->allocator(), &inner_function_jobs));
if (!outer_function_job) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
// Internalize ast values onto the heap.
@@ -940,7 +1063,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
if (!FinalizeUnoptimizedCode(&parse_info, isolate, shared_info,
outer_function_job.get(),
&inner_function_jobs)) {
- return FailWithPendingException(isolate, flag);
+ return FailWithPendingException(isolate, &parse_info, flag);
}
DCHECK(!isolate->has_pending_exception());
@@ -962,8 +1085,8 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
if (!shared_info->is_compiled() && !Compile(shared_info, flag)) return false;
Handle<Code> code = handle(shared_info->code(), isolate);
- // Allocate literals for the JSFunction.
- JSFunction::EnsureLiterals(function);
+ // Allocate FeedbackVector for the JSFunction.
+ JSFunction::EnsureFeedbackVector(function);
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared()->HasAsmWasmData()) {
@@ -1075,27 +1198,28 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// is unused (just 0), which means it's an available field to use to indicate
// this separation. But to make sure we're not causing other false hits, we
// negate the scope position.
- int position = eval_scope_position;
if (FLAG_harmony_function_tostring &&
restriction == ONLY_SINGLE_FUNCTION_LITERAL &&
parameters_end_pos != kNoSourcePosition) {
// use the parameters_end_pos as the eval_scope_position in the eval cache.
DCHECK_EQ(eval_scope_position, 0);
- position = -parameters_end_pos;
+ eval_scope_position = -parameters_end_pos;
}
CompilationCache* compilation_cache = isolate->compilation_cache();
- InfoVectorPair eval_result = compilation_cache->LookupEval(
- source, outer_info, context, language_mode, position);
- Handle<Cell> vector;
- if (eval_result.has_vector()) {
- vector = Handle<Cell>(eval_result.vector(), isolate);
+ InfoCellPair eval_result = compilation_cache->LookupEval(
+ source, outer_info, context, language_mode, eval_scope_position);
+ Handle<FeedbackCell> feedback_cell;
+ if (eval_result.has_feedback_cell()) {
+ feedback_cell = handle(eval_result.feedback_cell(), isolate);
}
Handle<SharedFunctionInfo> shared_info;
Handle<Script> script;
+ bool allow_eval_cache;
if (eval_result.has_shared()) {
shared_info = Handle<SharedFunctionInfo>(eval_result.shared(), isolate);
script = Handle<Script>(Script::cast(shared_info->script()), isolate);
+ allow_eval_cache = true;
} else {
script = isolate->factory()->NewScript(source);
if (isolate->NeedsSourcePositionsForProfiling()) {
@@ -1139,6 +1263,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (!CompileToplevel(&parse_info, isolate).ToHandle(&shared_info)) {
return MaybeHandle<JSFunction>();
}
+ allow_eval_cache = parse_info.allow_eval_cache();
}
// If caller is strict mode, the result must be in strict mode as well.
@@ -1146,27 +1271,32 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<JSFunction> result;
if (eval_result.has_shared()) {
- if (eval_result.has_vector()) {
+ if (eval_result.has_feedback_cell()) {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared_info, context, vector, NOT_TENURED);
+ shared_info, context, feedback_cell, NOT_TENURED);
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, NOT_TENURED);
- JSFunction::EnsureLiterals(result);
- // Make sure to cache this result.
- Handle<Cell> new_vector(result->feedback_vector_cell(), isolate);
- compilation_cache->PutEval(source, outer_info, context, shared_info,
- new_vector, eval_scope_position);
+ JSFunction::EnsureFeedbackVector(result);
+ if (allow_eval_cache) {
+ // Make sure to cache this result.
+ Handle<FeedbackCell> new_feedback_cell(result->feedback_cell(),
+ isolate);
+ compilation_cache->PutEval(source, outer_info, context, shared_info,
+ new_feedback_cell, eval_scope_position);
+ }
}
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, NOT_TENURED);
- JSFunction::EnsureLiterals(result);
- // Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
- // we didn't retrieve from there.
- Handle<Cell> vector(result->feedback_vector_cell(), isolate);
- compilation_cache->PutEval(source, outer_info, context, shared_info, vector,
- eval_scope_position);
+ JSFunction::EnsureFeedbackVector(result);
+ if (allow_eval_cache) {
+ // Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
+ // we didn't retrieve from there.
+ Handle<FeedbackCell> new_feedback_cell(result->feedback_cell(), isolate);
+ compilation_cache->PutEval(source, outer_info, context, shared_info,
+ new_feedback_cell, eval_scope_position);
+ }
}
// OnAfterCompile has to be called after we create the JSFunction, which we
@@ -1228,15 +1358,6 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
return Handle<JSFunction>::cast(result);
}
-namespace {
-
-bool ShouldProduceCodeCache(ScriptCompiler::CompileOptions options) {
- return options == ScriptCompiler::kProduceCodeCache ||
- options == ScriptCompiler::kProduceFullCodeCache;
-}
-
-} // namespace
-
bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
Handle<Context> context,
Handle<String> source) {
@@ -1477,30 +1598,55 @@ struct ScriptCompileTimerScope {
}
};
+Handle<Script> NewScript(Isolate* isolate, Handle<String> source,
+ Compiler::ScriptDetails script_details,
+ ScriptOriginOptions origin_options,
+ NativesFlag natives) {
+ // Create a script object describing the script to be compiled.
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
+ if (natives == NATIVES_CODE) {
+ script->set_type(Script::TYPE_NATIVE);
+ } else if (natives == EXTENSION_CODE) {
+ script->set_type(Script::TYPE_EXTENSION);
+ } else if (natives == INSPECTOR_CODE) {
+ script->set_type(Script::TYPE_INSPECTOR);
+ }
+ Handle<Object> script_name;
+ if (script_details.name_obj.ToHandle(&script_name)) {
+ script->set_name(*script_name);
+ script->set_line_offset(script_details.line_offset);
+ script->set_column_offset(script_details.column_offset);
+ }
+ script->set_origin_options(origin_options);
+ Handle<Object> source_map_url;
+ if (script_details.source_map_url.ToHandle(&source_map_url)) {
+ script->set_source_mapping_url(*source_map_url);
+ }
+ Handle<FixedArray> host_defined_options;
+ if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
+ script->set_host_defined_options(*host_defined_options);
+ }
+ return script;
+}
+
} // namespace
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
- Handle<String> source, MaybeHandle<Object> maybe_script_name,
- int line_offset, int column_offset, ScriptOriginOptions resource_options,
- MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
- v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options,
- ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives,
- MaybeHandle<FixedArray> maybe_host_defined_options) {
+ Handle<String> source, const Compiler::ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, v8::Extension* extension,
+ ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
if (compile_options == ScriptCompiler::kNoCompileOptions ||
compile_options == ScriptCompiler::kEagerCompile) {
cached_data = nullptr;
- } else if (compile_options == ScriptCompiler::kProduceParserCache ||
- ShouldProduceCodeCache(compile_options)) {
- DCHECK(cached_data && !*cached_data);
- DCHECK_NULL(extension);
- DCHECK(!isolate->debug()->is_loaded());
} else {
- DCHECK(compile_options == ScriptCompiler::kConsumeParserCache ||
- compile_options == ScriptCompiler::kConsumeCodeCache);
+ DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
DCHECK(cached_data && *cached_data);
DCHECK_NULL(extension);
}
@@ -1513,7 +1659,6 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Do a lookup in the compilation cache but not for extensions.
MaybeHandle<SharedFunctionInfo> maybe_result;
- Handle<Cell> vector;
if (extension == nullptr) {
bool can_consume_code_cache =
compile_options == ScriptCompiler::kConsumeCodeCache &&
@@ -1523,10 +1668,13 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
// First check per-isolate compilation cache.
- InfoVectorPair pair = compilation_cache->LookupScript(
- source, maybe_script_name, line_offset, column_offset, resource_options,
- context, language_mode);
- if (can_consume_code_cache && !pair.has_shared()) {
+ maybe_result = compilation_cache->LookupScript(
+ source, script_details.name_obj, script_details.line_offset,
+ script_details.column_offset, origin_options, isolate->native_context(),
+ language_mode);
+ if (!maybe_result.is_null()) {
+ compile_timer.set_hit_isolate_cache();
+ } else if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
@@ -1539,196 +1687,137 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
.ToHandle(&inner_result)) {
// Promote to per-isolate compilation cache.
DCHECK(inner_result->is_compiled());
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, inner_result);
- vector = isolate->factory()->NewCell(feedback_vector);
- compilation_cache->PutScript(source, context, language_mode,
- inner_result, vector);
+ compilation_cache->PutScript(source, isolate->native_context(),
+ language_mode, inner_result);
Handle<Script> script(Script::cast(inner_result->script()), isolate);
- isolate->debug()->OnAfterCompile(script);
if (isolate->NeedsSourcePositionsForProfiling()) {
Script::InitLineEnds(script);
}
- return inner_result;
- }
- // Deserializer failed. Fall through to compile.
- compile_timer.set_consuming_code_cache_failed();
- } else {
- if (pair.has_shared()) {
- maybe_result = MaybeHandle<SharedFunctionInfo>(pair.shared(), isolate);
- compile_timer.set_hit_isolate_cache();
- }
- if (pair.has_vector()) {
- vector = Handle<Cell>(pair.vector(), isolate);
+ maybe_result = inner_result;
+ } else {
+ // Deserializer failed. Fall through to compile.
+ compile_timer.set_consuming_code_cache_failed();
}
}
}
- base::ElapsedTimer timer;
- if (FLAG_profile_deserialization && ShouldProduceCodeCache(compile_options)) {
- timer.Start();
- }
+ if (maybe_result.is_null()) {
+ // No cache entry found compile the script.
+ Handle<Script> script =
+ NewScript(isolate, source, script_details, origin_options, natives);
- if (maybe_result.is_null() || ShouldProduceCodeCache(compile_options)) {
- // No cache entry found, or embedder wants a code cache. Compile the script.
-
- // Create a script object describing the script to be compiled.
- Handle<Script> script = isolate->factory()->NewScript(source);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
- }
- if (natives == NATIVES_CODE) {
- script->set_type(Script::TYPE_NATIVE);
- } else if (natives == EXTENSION_CODE) {
- script->set_type(Script::TYPE_EXTENSION);
- } else if (natives == INSPECTOR_CODE) {
- script->set_type(Script::TYPE_INSPECTOR);
- }
- Handle<Object> script_name;
- if (maybe_script_name.ToHandle(&script_name)) {
- script->set_name(*script_name);
- script->set_line_offset(line_offset);
- script->set_column_offset(column_offset);
- }
- script->set_origin_options(resource_options);
- Handle<Object> source_map_url;
- if (maybe_source_map_url.ToHandle(&source_map_url)) {
- script->set_source_mapping_url(*source_map_url);
- }
- Handle<FixedArray> host_defined_options;
- if (maybe_host_defined_options.ToHandle(&host_defined_options)) {
- script->set_host_defined_options(*host_defined_options);
- }
-
- // Compile the function and add it to the cache.
+ // Compile the function and add it to the isolate cache.
ParseInfo parse_info(script);
Zone compile_zone(isolate->allocator(), ZONE_NAME);
- if (resource_options.IsModule()) parse_info.set_module();
- if (compile_options != ScriptCompiler::kNoCompileOptions) {
- parse_info.set_cached_data(cached_data);
- }
- parse_info.set_compile_options(compile_options);
+ if (origin_options.IsModule()) parse_info.set_module();
parse_info.set_extension(extension);
- if (!context->IsNativeContext()) {
- parse_info.set_outer_scope_info(handle(context->scope_info()));
- }
- parse_info.set_eager(
- (compile_options == ScriptCompiler::kProduceFullCodeCache) ||
- (compile_options == ScriptCompiler::kEagerCompile));
+ parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
parse_info.set_language_mode(
stricter_language_mode(parse_info.language_mode(), language_mode));
maybe_result = CompileToplevel(&parse_info, isolate);
Handle<SharedFunctionInfo> result;
if (extension == nullptr && maybe_result.ToHandle(&result)) {
- // We need a feedback vector.
DCHECK(result->is_compiled());
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, result);
- vector = isolate->factory()->NewCell(feedback_vector);
- compilation_cache->PutScript(source, context, language_mode, result,
- vector);
- if (ShouldProduceCodeCache(compile_options) &&
- !script->ContainsAsmModule()) {
- compile_timer.set_producing_code_cache();
-
- HistogramTimerScope histogram_timer(
- isolate->counters()->compile_serialize());
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileSerialize);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileSerialize");
- *cached_data = CodeSerializer::Serialize(isolate, result, source);
- if (FLAG_profile_deserialization) {
- PrintF("[Compiling and serializing took %0.3f ms]\n",
- timer.Elapsed().InMillisecondsF());
- }
- }
+ compilation_cache->PutScript(source, isolate->native_context(),
+ language_mode, result);
+ } else if (maybe_result.is_null() && natives != EXTENSION_CODE &&
+ natives != NATIVES_CODE) {
+ isolate->ReportPendingMessages();
}
+ }
- if (maybe_result.is_null()) {
- if (natives != EXTENSION_CODE && natives != NATIVES_CODE) {
- isolate->ReportPendingMessages();
- }
- } else {
- isolate->debug()->OnAfterCompile(script);
- }
+ // On success, report script compilation to debugger.
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ isolate->debug()->OnAfterCompile(handle(Script::cast(result->script())));
}
+
return maybe_result;
}
-std::unique_ptr<CompilationJob> Compiler::CompileTopLevelOnBackgroundThread(
- ParseInfo* parse_info, AccountingAllocator* allocator,
- CompilationJobList* inner_function_jobs) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileCodeBackground");
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- parse_info->is_eval() ? RuntimeCallCounterId::kCompileBackgroundEval
- : RuntimeCallCounterId::kCompileBackgroundScript);
-
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- parse_info->set_language_mode(
- stricter_language_mode(parse_info->language_mode(), language_mode));
-
- // Can't access scope info data off-main-thread.
- DCHECK(!parse_info->consumed_preparsed_scope_data()->HasData());
-
- // Generate the unoptimized bytecode or asm-js data.
- std::unique_ptr<CompilationJob> outer_function_job(
- GenerateUnoptimizedCode(parse_info, allocator, inner_function_jobs));
- return outer_function_job;
+ScriptCompiler::ScriptStreamingTask* Compiler::NewBackgroundCompileTask(
+ ScriptStreamingData* source, Isolate* isolate) {
+ return new BackgroundCompileTask(source, isolate);
}
-Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForBackgroundCompile(
- Handle<Script> script, ParseInfo* parse_info, int source_length,
- CompilationJob* outer_function_job,
- CompilationJobList* inner_function_jobs) {
- Isolate* isolate = script->GetIsolate();
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForStreamedScript(
+ Handle<String> source, const ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, ScriptStreamingData* streaming_data) {
+ Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(
isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
PostponeInterruptsScope postpone(isolate);
- // TODO(titzer): increment the counters in caller.
+ int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
- if (outer_function_job == nullptr) {
- // Compilation failed on background thread - throw an exception.
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return Handle<SharedFunctionInfo>();
- }
-
- Handle<SharedFunctionInfo> result;
- if (FinalizeTopLevel(parse_info, isolate, outer_function_job,
- inner_function_jobs)
- .ToHandle(&result)) {
- isolate->debug()->OnAfterCompile(script);
- }
- return result;
-}
+ ParseInfo* parse_info = streaming_data->info.get();
+ parse_info->UpdateBackgroundParseStatisticsOnMainThread(isolate);
-Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
- Handle<Script> script, ParseInfo* parse_info, int source_length) {
- Isolate* isolate = script->GetIsolate();
- ScriptCompileTimerScope compile_timer(
- isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
- // TODO(titzer): increment the counters in caller.
- isolate->counters()->total_load_size()->Increment(source_length);
- isolate->counters()->total_compile_size()->Increment(source_length);
+ // Check if compile cache already holds the SFI, if so no need to finalize
+ // the code compiled on the background thread.
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ MaybeHandle<SharedFunctionInfo> maybe_result =
+ compilation_cache->LookupScript(
+ source, script_details.name_obj, script_details.line_offset,
+ script_details.column_offset, origin_options,
+ isolate->native_context(), parse_info->language_mode());
+ if (!maybe_result.is_null()) {
+ compile_timer.set_hit_isolate_cache();
+ }
+
+ if (maybe_result.is_null()) {
+ // No cache entry found, finalize compilation of the script and add it to
+ // the isolate cache.
+ Handle<Script> script = NewScript(isolate, source, script_details,
+ origin_options, NOT_NATIVES_CODE);
+ parse_info->set_script(script);
+ streaming_data->parser->UpdateStatistics(isolate, script);
+ streaming_data->parser->HandleSourceURLComments(isolate, script);
+
+ if (parse_info->literal() == nullptr) {
+ // Parsing has failed - report error messages.
+ parse_info->pending_error_handler()->ReportErrors(
+ isolate, script, parse_info->ast_value_factory());
+ } else {
+ // Parsing has succeeded - finalize compilation.
+ if (i::FLAG_background_compile) {
+ // Finalize background compilation.
+ if (streaming_data->outer_function_job) {
+ maybe_result = FinalizeTopLevel(
+ parse_info, isolate, streaming_data->outer_function_job.get(),
+ &streaming_data->inner_function_jobs);
+ } else {
+ // Compilation failed on background thread - throw an exception.
+ FailWithPendingException(
+ isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
+ }
+ } else {
+ // Compilation on main thread.
+ maybe_result = CompileToplevel(parse_info, isolate);
+ }
+ }
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- parse_info->set_language_mode(
- stricter_language_mode(parse_info->language_mode(), language_mode));
+ // Add compiled code to the isolate cache.
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ compilation_cache->PutScript(source, isolate->native_context(),
+ parse_info->language_mode(), result);
+ }
+ }
+ // On success, report script compilation to debugger.
Handle<SharedFunctionInfo> result;
- if (CompileToplevel(parse_info, isolate).ToHandle(&result)) {
- isolate->debug()->OnAfterCompile(script);
+ if (maybe_result.ToHandle(&result)) {
+ isolate->debug()->OnAfterCompile(handle(Script::cast(result->script())));
}
- return result;
+
+ streaming_data->Release();
+ return maybe_result;
}
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
@@ -1789,8 +1878,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!shared->optimization_disabled() && !shared->HasAsmWasmData() &&
shared->is_compiled()) {
- // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
if (!function->IsOptimized()) {
// Only mark for optimization if we don't already have optimized code.
@@ -1801,8 +1889,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
}
if (shared->is_compiled() && !shared->HasAsmWasmData()) {
- // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
Code* code = function->feedback_vector()->optimized_code();
if (code != nullptr) {
@@ -1814,5 +1901,22 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
}
}
+// ----------------------------------------------------------------------------
+// Implementation of ScriptStreamingData
+
+ScriptStreamingData::ScriptStreamingData(
+ ScriptCompiler::ExternalSourceStream* source_stream,
+ ScriptCompiler::StreamedSource::Encoding encoding)
+ : source_stream(source_stream), encoding(encoding) {}
+
+ScriptStreamingData::~ScriptStreamingData() {}
+
+void ScriptStreamingData::Release() {
+ parser.reset();
+ info.reset();
+ outer_function_job.reset();
+ inner_function_jobs.clear();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index b84134c14e..ca6b0893d0 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -13,6 +13,7 @@
#include "src/code-events.h"
#include "src/contexts.h"
#include "src/isolate.h"
+#include "src/unicode-cache.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -23,11 +24,9 @@ class CompilationInfo;
class CompilationJob;
class JavaScriptFrame;
class ParseInfo;
+class Parser;
class ScriptData;
-template <typename T>
-class ThreadedList;
-template <typename T>
-class ThreadedListZoneEntry;
+struct ScriptStreamingData;
typedef std::forward_list<std::unique_ptr<CompilationJob>> CompilationJobList;
@@ -57,11 +56,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
- // Compile top level code on a background thread. Should be finalized by
- // GetSharedFunctionInfoForBackgroundCompile.
- static std::unique_ptr<CompilationJob> CompileTopLevelOnBackgroundThread(
- ParseInfo* parse_info, AccountingAllocator* allocator,
- CompilationJobList* inner_function_jobs);
+ // Creates a new task that when run will parse and compile the streamed
+ // script associated with |streaming_data| and can be finalized with
+ // Compiler::GetSharedFunctionInfoForStreamedScript.
+ // Note: does not take ownership of streaming_data.
+ static ScriptCompiler::ScriptStreamingTask* NewBackgroundCompileTask(
+ ScriptStreamingData* streaming_data, Isolate* isolate);
// Generate and install code from previously queued compilation job.
static bool FinalizeCompilationJob(CompilationJob* job, Isolate* isolate);
@@ -71,17 +71,12 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// offer this chance, optimized closure instantiation will not call this.
static void PostInstantiation(Handle<JSFunction> function, PretenureFlag);
- typedef ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>
- EagerInnerFunctionLiterals;
-
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* parse_info,
Handle<SharedFunctionInfo> shared_info,
Isolate* isolate);
- // Rewrite, analyze scopes, and renumber. If |eager_literals| is non-null, it
- // is appended with inner function literals which should be eagerly compiled.
- static bool Analyze(ParseInfo* parse_info,
- EagerInnerFunctionLiterals* eager_literals = nullptr);
+ // Rewrite and analyze scopes.
+ static bool Analyze(ParseInfo* parse_info);
// ===========================================================================
// The following family of methods instantiates new functions for scripts or
@@ -120,28 +115,34 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Handle<Context> context, Handle<String> source,
ParseRestriction restriction, int parameters_end_pos);
- // Create a shared function info object for a String source within a context.
+ struct ScriptDetails {
+ ScriptDetails() : line_offset(0), column_offset(0) {}
+ explicit ScriptDetails(Handle<Object> script_name)
+ : line_offset(0), column_offset(0), name_obj(script_name) {}
+
+ int line_offset;
+ int column_offset;
+ i::MaybeHandle<i::Object> name_obj;
+ i::MaybeHandle<i::Object> source_map_url;
+ i::MaybeHandle<i::FixedArray> host_defined_options;
+ };
+
+ // Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
- Handle<String> source, MaybeHandle<Object> maybe_script_name,
- int line_offset, int column_offset, ScriptOriginOptions resource_options,
- MaybeHandle<Object> maybe_source_map_url, Handle<Context> context,
- v8::Extension* extension, ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options,
+ Handle<String> source, const ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, v8::Extension* extension,
+ ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
- NativesFlag is_natives_code,
- MaybeHandle<FixedArray> maybe_host_defined_options);
-
- // Create a shared function info object for a Script that has already been
- // parsed while the script was being loaded from a streamed source.
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
- Handle<Script> script, ParseInfo* info, int source_length);
+ NativesFlag is_natives_code);
- // Create a shared function info object for a Script that has already been
- // compiled on a background thread.
- static Handle<SharedFunctionInfo> GetSharedFunctionInfoForBackgroundCompile(
- Handle<Script> script, ParseInfo* parse_info, int source_length,
- CompilationJob* outer_function_job,
- CompilationJobList* inner_function_jobs);
+ // Create a shared function info object for a Script source that has already
+ // been parsed and possibly compiled on a background thread while being loaded
+ // from a streamed source. On return, the data held by |streaming_data| will
+ // have been released, however the object itself isn't freed and is still
+ // owned by the caller.
+ static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
+ Handle<String> source, const ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, ScriptStreamingData* streaming_data);
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
@@ -246,6 +247,34 @@ class V8_EXPORT_PRIVATE CompilationJob {
}
};
+// Contains all data which needs to be transmitted between threads for
+// background parsing and compiling and finalizing it on the main thread.
+struct ScriptStreamingData {
+ ScriptStreamingData(ScriptCompiler::ExternalSourceStream* source_stream,
+ ScriptCompiler::StreamedSource::Encoding encoding);
+ ~ScriptStreamingData();
+
+ void Release();
+
+ // Internal implementation of v8::ScriptCompiler::StreamedSource.
+ std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
+ ScriptCompiler::StreamedSource::Encoding encoding;
+ std::unique_ptr<ScriptCompiler::CachedData> cached_data;
+
+ // Data needed for parsing, and data needed to to be passed between thread
+ // between parsing and compilation. These need to be initialized before the
+ // compilation starts.
+ UnicodeCache unicode_cache;
+ std::unique_ptr<ParseInfo> info;
+ std::unique_ptr<Parser> parser;
+
+ // Data needed for finalizing compilation after background compilation.
+ std::unique_ptr<CompilationJob> outer_function_job;
+ CompilationJobList inner_function_jobs;
+
+ DISALLOW_COPY_AND_ASSIGN(ScriptStreamingData);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 2e9052e0c3..f250db84b9 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -5,16 +5,16 @@ jarin@chromium.org
mstarzinger@chromium.org
titzer@chromium.org
danno@chromium.org
+sigurds@chromium.org
tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
# For backend
bbudge@chromium.org
-mtrofin@chromium.org
+gdeepti@chromium.org
per-file wasm-*=ahaas@chromium.org
-per-file wasm-*=bbudge@chromium.org
per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 13d6801c32..e187d7170c 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -164,8 +164,8 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
}
// static
-FieldAccess AccessBuilder::ForJSFunctionFeedbackVector() {
- FieldAccess access = {kTaggedBase, JSFunction::kFeedbackVectorOffset,
+FieldAccess AccessBuilder::ForJSFunctionFeedbackCell() {
+ FieldAccess access = {kTaggedBase, JSFunction::kFeedbackCellOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -289,12 +289,12 @@ FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
}
// static
-FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise() {
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectIsAwaiting() {
FieldAccess access = {
- kTaggedBase, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
+ kTaggedBase, JSAsyncGeneratorObject::kIsAwaitingOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
@@ -1001,6 +1001,10 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
MachineType::Float64(), kNoWriteBarrier};
return access;
}
+ case kExternalBigInt64Array:
+ case kExternalBigUint64Array:
+ // TODO(neis/jkummerow): Define appropriate types.
+ UNIMPLEMENTED();
}
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index a2ce1f800b..fb8535c167 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -70,8 +70,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSFunction::shared() field.
static FieldAccess ForJSFunctionSharedFunctionInfo();
- // Provides access to JSFunction::feedback_vector() field.
- static FieldAccess ForJSFunctionFeedbackVector();
+ // Provides access to JSFunction::feedback_cell() field.
+ static FieldAccess ForJSFunctionFeedbackCell();
// Provides access to JSFunction::code() field.
static FieldAccess ForJSFunctionCode();
@@ -109,8 +109,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSAsyncGeneratorObject::queue() field.
static FieldAccess ForJSAsyncGeneratorObjectQueue();
- // Provides access to JSAsyncGeneratorObject::awaited_promise() field.
- static FieldAccess ForJSAsyncGeneratorObjectAwaitedPromise();
+ // Provides access to JSAsyncGeneratorObject::is_awaiting() field.
+ static FieldAccess ForJSAsyncGeneratorObjectIsAwaiting();
// Provides access to JSArray::length() field.
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 9b0c4b41b1..c1254e4cdb 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -25,7 +25,11 @@ bool CanInlineElementAccess(Handle<Map> map) {
if (map->has_indexed_interceptor()) return false;
ElementsKind const elements_kind = map->elements_kind();
if (IsFastElementsKind(elements_kind)) return true;
- if (IsFixedTypedArrayElementsKind(elements_kind)) return true;
+ if (IsFixedTypedArrayElementsKind(elements_kind) &&
+ elements_kind != BIGUINT64_ELEMENTS &&
+ elements_kind != BIGINT64_ELEMENTS) {
+ return true;
+ }
return false;
}
@@ -533,6 +537,18 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return false;
}
+bool AccessInfoFactory::ComputePropertyAccessInfo(
+ MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
+ PropertyAccessInfo* access_info) {
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ if (ComputePropertyAccessInfos(maps, name, access_mode, &access_infos) &&
+ access_infos.size() == 1) {
+ *access_info = access_infos.front();
+ return true;
+ }
+ return false;
+}
+
bool AccessInfoFactory::ComputePropertyAccessInfos(
MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos) {
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index dcdb0f35f0..54d402738b 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -149,6 +149,9 @@ class AccessInfoFactory final {
bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
AccessMode access_mode,
PropertyAccessInfo* access_info);
+ bool ComputePropertyAccessInfo(MapHandles const& maps, Handle<Name> name,
+ AccessMode access_mode,
+ PropertyAccessInfo* access_info);
bool ComputePropertyAccessInfos(MapHandles const& maps, Handle<Name> name,
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos);
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index a238cf29d4..8636c639e0 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -21,9 +21,6 @@ namespace compiler {
#define __ tasm()->
-#define kScratchReg r9
-
-
// Adds Arm-specific methods to convert InstructionOperands.
class ArmOperandConverter final : public InstructionOperandConverter {
public:
@@ -33,7 +30,9 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
+ case kFlags_branch_and_poison:
case kFlags_deoptimize:
+ case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
return SetCC;
@@ -44,21 +43,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
}
Operand InputImmediate(size_t index) {
- Constant constant = ToConstant(instr_->InputAt(index));
- switch (constant.type()) {
- case Constant::kInt32:
- return Operand(constant.ToInt32());
- case Constant::kFloat32:
- return Operand::EmbeddedNumber(constant.ToFloat32());
- case Constant::kFloat64:
- return Operand::EmbeddedNumber(constant.ToFloat64().value());
- case Constant::kInt64:
- case Constant::kExternalReference:
- case Constant::kHeapObject:
- case Constant::kRpoNumber:
- break;
- }
- UNREACHABLE();
+ return ToImmediate(instr_->InputAt(index));
}
Operand InputOperand2(size_t first_index) {
@@ -124,6 +109,30 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return InputOffset(&first_index);
}
+ Operand ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ if (RelocInfo::IsWasmReference(constant.rmode())) {
+ return Operand(constant.ToInt32(), constant.rmode());
+ } else {
+ return Operand(constant.ToInt32());
+ }
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ return Operand(constant.ToExternalReference());
+ case Constant::kInt64:
+ case Constant::kHeapObject:
+ // TODO(dcarney): loading RPO constants on arm.
+ case Constant::kRpoNumber:
+ break;
+ }
+ UNREACHABLE();
+ }
+
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
@@ -314,6 +323,17 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ ArmOperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
+ }
+}
+
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -571,28 +591,54 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ ComputeCodeStartAddress(scratch);
+ __ cmp(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. compute the offset of the {CodeDataContainer} from our current location
-// and load it.
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- int pc_offset = __ pc_offset();
- int offset = Code::kCodeDataContainerOffset -
- (Code::kHeaderSize + pc_offset + TurboAssembler::kPcLoadDelta);
- // We can use the register pc - 8 for the address of the current instruction.
- __ ldr_pcrel(ip, offset);
- __ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
- __ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(scratch);
+ __ cmp(kJavaScriptCallCodeStartRegister, scratch);
+ __ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq);
+ __ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne);
+ __ csdb();
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ and_(sp, sp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -611,9 +657,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ add(ip, i.InputRegister(0),
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ add(scratch, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ __ Call(scratch);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -657,9 +705,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
- __ add(ip, i.InputRegister(0),
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ add(scratch, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ __ Jump(scratch);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
unwinding_info_writer_.MarkBlockWillExit();
@@ -701,14 +751,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
// Check the function's context matches the context argument.
- __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ cmp(cp, kScratchReg);
+ __ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, scratch);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
+ __ ldr(r2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r2);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();
@@ -1154,7 +1207,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.InputInt32(2));
} else {
__ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2));
}
break;
}
@@ -1166,7 +1219,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.InputInt32(2));
} else {
__ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2));
}
break;
}
@@ -1178,7 +1231,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.InputInt32(2));
} else {
__ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), kScratchReg, i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2));
}
break;
}
@@ -1354,35 +1407,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF32S32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32U32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64S32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64U32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
@@ -1393,7 +1451,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtU32F32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
@@ -1404,14 +1463,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtS32F64: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F64: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister scratch = temps.AcquireS();
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1453,10 +1514,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@@ -1464,9 +1527,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@@ -1474,6 +1539,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
@@ -1629,6 +1695,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArmDsbIsb: {
+ __ dsb(SY);
+ __ isb(SY);
+ break;
+ }
case kArmF32x4Splat: {
int src_code = i.InputFloatRegister(0).code();
__ vdup(Neon32, i.OutputSimd128Register(),
@@ -2202,41 +2273,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon32, dst, kScratchQuadReg); // dst = [0, 2, 4, 6]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon32, dst, scratch); // dst = [0, 2, 4, 6]
break;
}
case kArmS32x4UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon32, kScratchQuadReg, dst); // dst = [1, 3, 5, 7]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon32, scratch, dst); // dst = [1, 3, 5, 7]
break;
}
case kArmS32x4TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
DCHECK(dst == i.InputSimd128Register(0));
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon32, dst, kScratchQuadReg); // dst = [0, 4, 2, 6]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon32, dst, scratch); // dst = [0, 4, 2, 6]
break;
}
case kArmS32x4Shuffle: {
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
// Check for in-place shuffles.
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
if (dst == src0) {
- __ vmov(kScratchQuadReg, src0);
- src0 = kScratchQuadReg;
+ Simd128Register scratch = temps.AcquireQ();
+ __ vmov(scratch, src0);
+ src0 = scratch;
} else if (dst == src1) {
- __ vmov(kScratchQuadReg, src1);
- src1 = kScratchQuadReg;
+ Simd128Register scratch = temps.AcquireQ();
+ __ vmov(scratch, src1);
+ src1 = scratch;
}
// Perform shuffle as a vmov per lane.
int dst_code = dst.code() * 4;
@@ -2258,10 +2338,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS32x4TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon32, kScratchQuadReg, dst); // dst = [1, 5, 3, 7]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon32, scratch, dst); // dst = [1, 5, 3, 7]
break;
}
case kArmS16x8ZipLeft: {
@@ -2285,37 +2367,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS16x8UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon16, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 14]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon16, dst, scratch); // dst = [0, 2, 4, 6, ... 14]
break;
}
case kArmS16x8UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon16, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 15]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon16, scratch, dst); // dst = [1, 3, 5, 7, ... 15]
break;
}
case kArmS16x8TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon16, dst, kScratchQuadReg); // dst = [0, 8, 2, 10, ... 14]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon16, dst, scratch); // dst = [0, 8, 2, 10, ... 14]
break;
}
case kArmS16x8TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon16, kScratchQuadReg, dst); // dst = [1, 9, 3, 11, ... 15]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon16, scratch, dst); // dst = [1, 9, 3, 11, ... 15]
break;
}
case kArmS8x16ZipLeft: {
@@ -2339,37 +2429,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmS8x16UnzipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon8, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 30]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon8, dst, scratch); // dst = [0, 2, 4, 6, ... 30]
break;
}
case kArmS8x16UnzipRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vuzp(Neon8, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 31]
+ __ vmov(scratch, src1);
+ __ vuzp(Neon8, scratch, dst); // dst = [1, 3, 5, 7, ... 31]
break;
}
case kArmS8x16TransposeLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon8, dst, kScratchQuadReg); // dst = [0, 16, 2, 18, ... 30]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon8, dst, scratch); // dst = [0, 16, 2, 18, ... 30]
break;
}
case kArmS8x16TransposeRight: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
DCHECK(dst == i.InputSimd128Register(0));
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
- __ vmov(kScratchQuadReg, src1);
- __ vtrn(Neon8, kScratchQuadReg, dst); // dst = [1, 17, 3, 19, ... 31]
+ __ vmov(scratch, src1);
+ __ vtrn(Neon8, scratch, dst); // dst = [1, 17, 3, 19, ... 31]
break;
}
case kArmS8x16Concat: {
@@ -2382,12 +2480,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
DwVfpRegister table_base = src0.low();
+ UseScratchRegisterScope temps(tasm());
+ Simd128Register scratch = temps.AcquireQ();
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
// src1. They must be consecutive.
int table_size = src0 == src1 ? 2 : 4;
DCHECK_IMPLIES(src0 != src1, src0.code() + 1 == src1.code());
- // The shuffle lane mask is a byte mask, materialize in kScratchQuadReg.
- int scratch_s_base = kScratchQuadReg.code() * 4;
+ // The shuffle lane mask is a byte mask, materialize in scratch.
+ int scratch_s_base = scratch.code() * 4;
for (int j = 0; j < 4; j++) {
uint32_t four_lanes = i.InputUint32(2 + j);
// Ensure byte indices are in [0, 31] so masks are never NaNs.
@@ -2397,12 +2497,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
NeonListOperand table(table_base, table_size);
if (dst != src0 && dst != src1) {
- __ vtbl(dst.low(), table, kScratchQuadReg.low());
- __ vtbl(dst.high(), table, kScratchQuadReg.high());
+ __ vtbl(dst.low(), table, scratch.low());
+ __ vtbl(dst.high(), table, scratch.high());
} else {
- __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
- __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
- __ vmov(dst, kScratchQuadReg);
+ __ vtbl(scratch.low(), table, scratch.low());
+ __ vtbl(scratch.high(), table, scratch.high());
+ __ vmov(dst, scratch);
}
break;
}
@@ -2432,149 +2532,156 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmS1x4AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmax(NeonU32, kScratchDoubleReg, src.low(), src.high());
- __ vpmax(NeonU32, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmax(NeonU32, scratch, src.low(), src.high());
+ __ vpmax(NeonU32, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
break;
}
case kArmS1x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmin(NeonU32, kScratchDoubleReg, src.low(), src.high());
- __ vpmin(NeonU32, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmin(NeonU32, scratch, src.low(), src.high());
+ __ vpmin(NeonU32, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
break;
}
case kArmS1x8AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmax(NeonU16, kScratchDoubleReg, src.low(), src.high());
- __ vpmax(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ vpmax(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS16, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmax(NeonU16, scratch, src.low(), src.high());
+ __ vpmax(NeonU16, scratch, scratch, scratch);
+ __ vpmax(NeonU16, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
break;
}
case kArmS1x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmin(NeonU16, kScratchDoubleReg, src.low(), src.high());
- __ vpmin(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ vpmin(NeonU16, kScratchDoubleReg, kScratchDoubleReg,
- kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS16, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmin(NeonU16, scratch, src.low(), src.high());
+ __ vpmin(NeonU16, scratch, scratch, scratch);
+ __ vpmin(NeonU16, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
break;
}
case kArmS1x16AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmax(NeonU8, kScratchDoubleReg, src.low(), src.high());
- __ vpmax(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- // vtst to detect any bits in the bottom 32 bits of kScratchDoubleReg.
+ UseScratchRegisterScope temps(tasm());
+ QwNeonRegister q_scratch = temps.AcquireQ();
+ DwVfpRegister d_scratch = q_scratch.low();
+ __ vpmax(NeonU8, d_scratch, src.low(), src.high());
+ __ vpmax(NeonU8, d_scratch, d_scratch, d_scratch);
+ // vtst to detect any bits in the bottom 32 bits of d_scratch.
// This saves an instruction vs. the naive sequence of vpmax.
// kDoubleRegZero is not changed, since it is 0.
- __ vtst(Neon32, kScratchQuadReg, kScratchQuadReg, kScratchQuadReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS32, 0);
+ __ vtst(Neon32, q_scratch, q_scratch, q_scratch);
+ __ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0);
break;
}
case kArmS1x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
- __ vpmin(NeonU8, kScratchDoubleReg, src.low(), src.high());
- __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpmin(NeonU8, kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS8, 0);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister scratch = temps.AcquireD();
+ __ vpmin(NeonU8, scratch, src.low(), src.high());
+ __ vpmin(NeonU8, scratch, scratch, scratch);
+ __ vpmin(NeonU8, scratch, scratch, scratch);
+ __ vpmin(NeonU8, scratch, scratch, scratch);
+ __ ExtractLane(i.OutputRegister(), scratch, NeonS8, 0);
break;
}
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(str);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
break;
ATOMIC_BINOP_CASE(Add, add)
@@ -2607,6 +2714,20 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
+ FlagsConditionToCondition(condition));
+ __ csdb();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2654,8 +2775,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -2725,9 +2847,9 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
frame->AlignSavedCalleeRegisterSlots();
}
@@ -2741,7 +2863,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
(kDoubleSize / kPointerSize));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
frame->AllocateSavedCalleeRegisterSlots(base::bits::CountPopulation(saves));
@@ -2749,14 +2871,14 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(lr, fp);
__ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2766,8 +2888,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -2780,10 +2902,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
if (info()->IsWasm()) {
@@ -2861,8 +2984,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -2871,13 +2994,13 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ ldm(ia_w, sp, saves);
}
// Restore FP registers.
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
@@ -2889,7 +3012,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
unwinding_info_writer_.MarkBlockWillExit();
ArmOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -2922,281 +3045,253 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(g.ToRegister(destination), src);
- } else {
- __ str(src, g.ToMemOperand(destination));
- }
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- MemOperand src = g.ToMemOperand(source);
- if (destination->IsRegister()) {
- __ ldr(g.ToRegister(destination), src);
- } else {
- Register temp = kScratchReg;
- __ ldr(temp, src);
- __ str(temp, g.ToMemOperand(destination));
- }
- } else if (source->IsConstant()) {
- Constant src = g.ToConstant(source);
- if (destination->IsRegister() || destination->IsStackSlot()) {
- Register dst =
- destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
- switch (src.type()) {
- case Constant::kInt32:
- if (RelocInfo::IsWasmReference(src.rmode())) {
- __ mov(dst, Operand(src.ToInt32(), src.rmode()));
- } else {
- __ mov(dst, Operand(src.ToInt32()));
- }
- break;
- case Constant::kInt64:
- UNREACHABLE();
- break;
- case Constant::kFloat32:
- __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
- break;
- case Constant::kFloat64:
- __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
- break;
- case Constant::kExternalReference:
- __ mov(dst, Operand(src.ToExternalReference()));
- break;
- case Constant::kHeapObject: {
- Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
- if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
- } else {
- __ Move(dst, src_object);
- }
- break;
- }
- case Constant::kRpoNumber:
- UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
- break;
- }
- if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
- } else if (src.type() == Constant::kFloat32) {
- if (destination->IsFloatStackSlot()) {
- MemOperand dst = g.ToMemOperand(destination);
- Register temp = kScratchReg;
- __ mov(temp, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ str(temp, dst);
+ // Helper function to write the given constant to the dst register.
+ auto MoveConstantToRegister = [&](Register dst, Constant src) {
+ if (src.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
} else {
- SwVfpRegister dst = g.ToFloatRegister(destination);
- __ vmov(dst, Float32::FromBits(src.ToFloat32AsInt()));
+ __ Move(dst, src_object);
}
} else {
- DCHECK_EQ(Constant::kFloat64, src.type());
- DwVfpRegister dst = destination->IsFPRegister()
- ? g.ToDoubleRegister(destination)
- : kScratchDoubleReg;
- __ vmov(dst, src.ToFloat64(), kScratchReg);
- if (destination->IsDoubleStackSlot()) {
- __ vstr(dst, g.ToMemOperand(destination));
- }
+ __ mov(dst, g.ToImmediate(source));
}
- } else if (source->IsFPRegister()) {
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- __ vstr(src, g.ToMemOperand(destination));
- }
- } else if (rep == MachineRepresentation::kFloat32) {
- // GapResolver may give us reg codes that don't map to actual s-registers.
- // Generate code to work around those cases.
- int src_code = LocationOperand::cast(source)->register_code();
- if (destination->IsFloatRegister()) {
+ };
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ mov(g.ToRegister(destination), g.ToRegister(source));
+ } else if (source->IsFloatRegister()) {
+ DCHECK(destination->IsFloatRegister());
+ // GapResolver may give us reg codes that don't map to actual
+ // s-registers. Generate code to work around those cases.
+ int src_code = LocationOperand::cast(source)->register_code();
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src_code);
+ } else if (source->IsDoubleRegister()) {
+ __ Move(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else {
- DCHECK(destination->IsFloatStackSlot());
- __ VmovExtended(g.ToMemOperand(destination), src_code);
+ __ Move(g.ToSimd128Register(destination), g.ToSimd128Register(source));
}
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- QwNeonRegister src = g.ToSimd128Register(source);
- if (destination->IsSimd128Register()) {
- QwNeonRegister dst = g.ToSimd128Register(destination);
- __ Move(dst, src);
+ return;
+ case MoveType::kRegisterToStack: {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsRegister()) {
+ __ str(g.ToRegister(source), dst);
+ } else if (source->IsFloatRegister()) {
+ // GapResolver may give us reg codes that don't map to actual
+ // s-registers. Generate code to work around those cases.
+ int src_code = LocationOperand::cast(source)->register_code();
+ __ VmovExtended(dst, src_code);
+ } else if (source->IsDoubleRegister()) {
+ __ vstr(g.ToDoubleRegister(source), dst);
} else {
- DCHECK(destination->IsSimd128StackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
- __ vst1(Neon8, NeonListOperand(src.low(), 2),
- NeonMemOperand(kScratchReg));
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
+ QwNeonRegister src = g.ToSimd128Register(source);
+ __ add(temp, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(src.low(), 2), NeonMemOperand(temp));
}
- }
- } else if (source->IsFPStackSlot()) {
- MemOperand src = g.ToMemOperand(source);
- MachineRepresentation rep =
- LocationOperand::cast(destination)->representation();
- if (destination->IsFPRegister()) {
- if (rep == MachineRepresentation::kFloat64) {
- __ vldr(g.ToDoubleRegister(destination), src);
- } else if (rep == MachineRepresentation::kFloat32) {
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ MemOperand src = g.ToMemOperand(source);
+ if (source->IsStackSlot()) {
+ __ ldr(g.ToRegister(destination), src);
+ } else if (source->IsFloatStackSlot()) {
+ DCHECK(destination->IsFloatRegister());
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src);
+ } else if (source->IsDoubleStackSlot()) {
+ __ vldr(g.ToDoubleRegister(destination), src);
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
QwNeonRegister dst = g.ToSimd128Register(destination);
- __ add(kScratchReg, src.rn(), Operand(src.offset()));
- __ vld1(Neon8, NeonListOperand(dst.low(), 2),
- NeonMemOperand(kScratchReg));
+ __ add(temp, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(dst.low(), 2), NeonMemOperand(temp));
}
- } else {
- DCHECK(destination->IsFPStackSlot());
- if (rep == MachineRepresentation::kFloat64) {
- DwVfpRegister temp = kScratchDoubleReg;
+ return;
+ }
+ case MoveType::kStackToStack: {
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ UseScratchRegisterScope temps(tasm());
+ if (source->IsStackSlot() || source->IsFloatStackSlot()) {
+ SwVfpRegister temp = temps.AcquireS();
__ vldr(temp, src);
- __ vstr(temp, g.ToMemOperand(destination));
- } else if (rep == MachineRepresentation::kFloat32) {
- SwVfpRegister temp = kScratchDoubleReg.low();
+ __ vstr(temp, dst);
+ } else if (source->IsDoubleStackSlot()) {
+ DwVfpRegister temp = temps.AcquireD();
__ vldr(temp, src);
- __ vstr(temp, g.ToMemOperand(destination));
+ __ vstr(temp, dst);
+ } else {
+ DCHECK(source->IsSimd128StackSlot());
+ Register temp = temps.Acquire();
+ QwNeonRegister temp_q = temps.AcquireQ();
+ __ add(temp, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(temp_q.low(), 2), NeonMemOperand(temp));
+ __ add(temp, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(temp_q.low(), 2), NeonMemOperand(temp));
+ }
+ return;
+ }
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ MoveConstantToRegister(g.ToRegister(destination), src);
+ } else if (destination->IsFloatRegister()) {
+ __ vmov(g.ToFloatRegister(destination),
+ Float32::FromBits(src.ToFloat32AsInt()));
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- MemOperand dst = g.ToMemOperand(destination);
- __ add(kScratchReg, src.rn(), Operand(src.offset()));
- __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
- NeonMemOperand(kScratchReg));
- __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
- __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
- NeonMemOperand(kScratchReg));
+ // TODO(arm): Look into optimizing this further if possible. Supporting
+ // the NEON version of VMOV may help.
+ __ vmov(g.ToDoubleRegister(destination), src.ToFloat64());
}
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ if (destination->IsStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ // Acquire a S register instead of a general purpose register in case
+ // `vstr` needs one to compute the address of `dst`.
+ SwVfpRegister s_temp = temps.AcquireS();
+ {
+ // TODO(arm): This sequence could be optimized further if necessary by
+ // writing the constant directly into `s_temp`.
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
+ MoveConstantToRegister(temp, src);
+ __ vmov(s_temp, temp);
+ }
+ __ vstr(s_temp, dst);
+ } else if (destination->IsFloatStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister temp = temps.AcquireS();
+ __ vmov(temp, Float32::FromBits(src.ToFloat32AsInt()));
+ __ vstr(temp, dst);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp = temps.AcquireD();
+ // TODO(arm): Look into optimizing this further if possible. Supporting
+ // the NEON version of VMOV may help.
+ __ vmov(temp, src.ToFloat64());
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- Register temp = kScratchReg;
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ mov(temp, src);
- __ ldr(src, dst);
- __ str(temp, dst);
- }
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsStackSlot());
- Register temp_0 = kScratchReg;
- SwVfpRegister temp_1 = kScratchDoubleReg.low();
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ ldr(temp_0, src);
- __ vldr(temp_1, dst);
- __ str(temp_0, dst);
- __ vstr(temp_1, src);
- } else if (source->IsFPRegister()) {
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- LowDwVfpRegister temp = kScratchDoubleReg;
- if (rep == MachineRepresentation::kFloat64) {
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Swap(src, dst);
- } else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ vldr(src, dst);
- __ vstr(temp, dst);
- }
- } else if (rep == MachineRepresentation::kFloat32) {
- int src_code = LocationOperand::cast(source)->register_code();
- if (destination->IsFPRegister()) {
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ Swap(g.ToRegister(source), g.ToRegister(destination));
+ } else if (source->IsFloatRegister()) {
+ DCHECK(destination->IsFloatRegister());
+ // GapResolver may give us reg codes that don't map to actual
+ // s-registers. Generate code to work around those cases.
+ UseScratchRegisterScope temps(tasm());
+ LowDwVfpRegister temp = temps.AcquireLowD();
+ int src_code = LocationOperand::cast(source)->register_code();
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst_code);
__ VmovExtended(dst_code, temp.low().code());
+ } else if (source->IsDoubleRegister()) {
+ __ Swap(g.ToDoubleRegister(source), g.ToDoubleRegister(destination));
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
+ __ Swap(g.ToSimd128Register(source), g.ToSimd128Register(destination));
+ }
+ return;
+ case MoveType::kRegisterToStack: {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister temp = temps.AcquireS();
+ __ vmov(temp, src);
+ __ ldr(src, dst);
+ __ vstr(temp, dst);
+ } else if (source->IsFloatRegister()) {
+ int src_code = LocationOperand::cast(source)->register_code();
+ UseScratchRegisterScope temps(tasm());
+ LowDwVfpRegister temp = temps.AcquireLowD();
__ VmovExtended(temp.low().code(), src_code);
__ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst);
- }
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- QwNeonRegister src = g.ToSimd128Register(source);
- if (destination->IsFPRegister()) {
- QwNeonRegister dst = g.ToSimd128Register(destination);
- __ Swap(src, dst);
+ } else if (source->IsDoubleRegister()) {
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp = temps.AcquireD();
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(kScratchQuadReg, src);
- __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
- __ vld1(Neon8, NeonListOperand(src.low(), 2),
- NeonMemOperand(kScratchReg));
- __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
- NeonMemOperand(kScratchReg));
+ QwNeonRegister src = g.ToSimd128Register(source);
+ UseScratchRegisterScope temps(tasm());
+ Register temp = temps.Acquire();
+ QwNeonRegister temp_q = temps.AcquireQ();
+ __ Move(temp_q, src);
+ __ add(temp, dst.rn(), Operand(dst.offset()));
+ __ vld1(Neon8, NeonListOperand(src.low(), 2), NeonMemOperand(temp));
+ __ vst1(Neon8, NeonListOperand(temp_q.low(), 2), NeonMemOperand(temp));
}
+ return;
}
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPStackSlot());
- Register temp_0 = kScratchReg;
- LowDwVfpRegister temp_1 = kScratchDoubleReg;
- MemOperand src0 = g.ToMemOperand(source);
- MemOperand dst0 = g.ToMemOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ vstr(temp_1.low(), src0);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- MemOperand src1(src0.rn(), src0.offset() + kDoubleSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kDoubleSize);
- __ vldr(kScratchQuadReg.low(), dst0);
- __ vldr(kScratchQuadReg.high(), src0);
- __ vstr(kScratchQuadReg.low(), src0);
- __ vstr(kScratchQuadReg.high(), dst0);
- __ vldr(kScratchQuadReg.low(), dst1);
- __ vldr(kScratchQuadReg.high(), src1);
- __ vstr(kScratchQuadReg.low(), src1);
- __ vstr(kScratchQuadReg.high(), dst1);
+ case MoveType::kStackToStack: {
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ if (source->IsStackSlot() || source->IsFloatStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ SwVfpRegister temp_0 = temps.AcquireS();
+ SwVfpRegister temp_1 = temps.AcquireS();
+ __ vldr(temp_0, dst);
+ __ vldr(temp_1, src);
+ __ vstr(temp_0, src);
+ __ vstr(temp_1, dst);
+ } else if (source->IsDoubleStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp_0 = temps.AcquireD();
+ DwVfpRegister temp_1 = temps.AcquireD();
+ __ vldr(temp_0, dst);
+ __ vldr(temp_1, src);
+ __ vstr(temp_0, src);
+ __ vstr(temp_1, dst);
+ } else {
+ DCHECK(source->IsSimd128StackSlot());
+ MemOperand src0 = src;
+ MemOperand dst0 = dst;
+ MemOperand src1(src.rn(), src.offset() + kDoubleSize);
+ MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
+ UseScratchRegisterScope temps(tasm());
+ DwVfpRegister temp_0 = temps.AcquireD();
+ DwVfpRegister temp_1 = temps.AcquireD();
+ __ vldr(temp_0, dst0);
+ __ vldr(temp_1, src0);
+ __ vstr(temp_0, src0);
+ __ vstr(temp_1, dst0);
+ __ vldr(temp_0, dst1);
+ __ vldr(temp_1, src1);
+ __ vstr(temp_0, src1);
+ __ vstr(temp_1, dst1);
+ }
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -3206,7 +3301,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
#undef __
-#undef kScratchReg
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index a7cf80450a..a9f9be38ef 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -125,6 +125,7 @@ namespace compiler {
V(ArmPush) \
V(ArmPoke) \
V(ArmPeek) \
+ V(ArmDsbIsb) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index a592515179..e538020f69 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -274,6 +274,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmStr:
case kArmPush:
case kArmPoke:
+ case kArmDsbIsb:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index f94d114d07..ef81c98716 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -460,11 +460,17 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
InstructionOperand output = g.DefineAsRegister(node);
EmitLoad(this, opcode, &output, base, index);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1121,7 +1127,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmDsbIsb, g.NoOutput());
+}
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1157,6 +1166,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
}
+ break;
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher mleft(m.left().node());
@@ -1175,6 +1185,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
return;
}
}
+ break;
}
default:
break;
@@ -1211,6 +1222,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
g.UseRegister(mright.left().node()), g.TempImmediate(0));
return;
}
+ break;
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher mright(m.right().node());
@@ -1229,6 +1241,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
return;
}
}
+ break;
}
default:
break;
@@ -1395,6 +1408,7 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
#define RR_VISITOR_V8(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1403,6 +1417,7 @@ RR_OP_LIST(RR_VISITOR)
}
RR_OP_LIST_V8(RR_VISITOR_V8)
#undef RR_VISITOR_V8
+#undef RR_OP_LIST_V8
#define RRR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1410,6 +1425,7 @@ RR_OP_LIST_V8(RR_VISITOR_V8)
}
RRR_OP_LIST(RRR_VISITOR)
#undef RRR_VISITOR
+#undef RRR_OP_LIST
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1500,14 +1516,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
ArmOperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1529,9 +1545,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
ArmOperandGenerator g(this);
int reverse_slot = 0;
@@ -1539,7 +1555,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
@@ -1806,13 +1822,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, opcode, cont);
}
+} // namespace
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1821,41 +1837,41 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1867,21 +1883,21 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (!result || selector->IsDefined(result)) {
+ if (!result || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
+ return VisitBinop(this, node, kArmAdd, kArmAdd, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
+ return VisitBinop(this, node, kArmSub, kArmRsb, cont);
case IrOpcode::kInt32MulWithOverflow:
// ARM doesn't set the overflow flag for multiplication, so we
// need to test on kNotEqual. Here is the code sequence used:
// smull resultlow, resulthigh, left, right
// cmp resulthigh, Operand(resultlow, ASR, 31)
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return EmitInt32MulWithOverflow(this, node, cont);
default:
break;
}
@@ -1889,112 +1905,79 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Add:
- return VisitWordCompare(selector, value, kArmCmn, cont);
+ return VisitWordCompare(this, value, kArmCmn, cont);
case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, kArmCmp, cont);
+ return VisitWordCompare(this, value, kArmCmp, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kArmTst, cont);
+ return VisitWordCompare(this, value, kArmTst, cont);
case IrOpcode::kWord32Or:
- return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
+ return VisitBinop(this, value, kArmOrr, kArmOrr, cont);
case IrOpcode::kWord32Xor:
- return VisitWordCompare(selector, value, kArmTeq, cont);
+ return VisitWordCompare(this, value, kArmTeq, cont);
case IrOpcode::kWord32Sar:
- return VisitShift(selector, value, TryMatchASR, cont);
+ return VisitShift(this, value, TryMatchASR, cont);
case IrOpcode::kWord32Shl:
- return VisitShift(selector, value, TryMatchLSL, cont);
+ return VisitShift(this, value, TryMatchLSL, cont);
case IrOpcode::kWord32Shr:
- return VisitShift(selector, value, TryMatchLSR, cont);
+ return VisitShift(this, value, TryMatchLSR, cont);
case IrOpcode::kWord32Ror:
- return VisitShift(selector, value, TryMatchROR, cont);
+ return VisitShift(this, value, TryMatchROR, cont);
default:
break;
}
}
if (user->opcode() == IrOpcode::kWord32Equal) {
- return VisitWordCompare(selector, user, cont);
+ return VisitWordCompare(this, user, cont);
}
// Continuation could not be combined with a compare, emit compare against 0.
- ArmOperandGenerator g(selector);
+ ArmOperandGenerator g(this);
InstructionCode const opcode =
cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ Emit(opcode, g.NoOutput(), value_operand, value_operand,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
+ EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
- value_operand);
+ Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+ value_operand);
} else {
DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
- g.UseImmediate(cont->trap_id()));
+ Emit(opcode, g.NoOutput(), value_operand, value_operand,
+ g.UseImmediate(cont->trap_id()));
}
}
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
ArmOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
- index_operand, value_operand, g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
+ index_operand, value_operand, g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2006,7 +1989,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, &cont);
}
@@ -2137,7 +2120,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2145,13 +2128,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2161,7 +2146,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2170,13 +2155,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2193,7 +2178,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2201,15 +2186,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2228,7 +2213,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2237,15 +2222,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2304,11 +2289,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2323,11 +2309,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4, 4) \
- V(16x8, 8) \
- V(8x16, 16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kArmF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArmF32x4UConvertI32x4) \
@@ -2460,6 +2441,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
}
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
+#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2467,6 +2449,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
}
SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2474,6 +2457,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
+#undef SIMD_SHIFT_OP_LIST
#define SIMD_VISIT_BINOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2481,6 +2465,7 @@ SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
}
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
void InstructionSelector::VisitS128Select(Node* node) {
ArmOperandGenerator g(this);
@@ -2631,6 +2616,18 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmSxtb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmSxth, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
@@ -2642,7 +2639,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- MachineOperatorBuilder::Flags flags;
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(SUDIV)) {
// The sdiv and udiv instructions correctly return 0 if the divisor is 0,
// but the fall-back implementation does not.
@@ -2676,6 +2674,9 @@ InstructionSelector::AlignmentRequirements() {
SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
index d47ca083ae..a741121e32 100644
--- a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
+++ b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
-#define V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
+#ifndef V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
+#define V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
#include "src/eh-frame.h"
@@ -69,4 +69,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_ARM_UNWINDING_INFO_WRITER_ARM_H_
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 147d85a171..a07236b859 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -256,8 +256,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
offset = FrameOffset::FromStackPointer(from_sp);
}
}
- return MemOperand(offset.from_stack_pointer() ? tasm->StackPointer() : fp,
- offset.offset());
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -297,8 +296,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr, padreg);
- unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
- __ StackPointer());
+ unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
@@ -374,6 +372,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
+ : kSpeculationPoisonRegister.W();
+ codegen->tasm()->And(value, value, Operand(poison));
+ }
+}
+
} // namespace
#define ASSEMBLE_SHIFT(asm_instr, width) \
@@ -455,7 +466,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
- __ Mov(csp, fp);
+ __ Mov(sp, fp);
__ Pop(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -535,29 +546,27 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
}
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ __ ComputeCodeStartAddress(scratch);
+ __ cmp(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. compute the offset of the {CodeDataContainer} from our current location
-// and load it.
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
- {
- // Since we always emit a bailout check at the very beginning we can be
- // certain that the distance between here and the {CodeDataContainer} is
- // fixed and always in range of a load.
- int data_container_offset =
- (Code::kCodeDataContainerOffset - Code::kHeaderSize) - __ pc_offset();
- DCHECK_GE(0, data_container_offset);
- DCHECK_EQ(0, data_container_offset % 4);
- InstructionAccurateScope scope(tasm());
- __ ldr_pcrel(scratch, data_container_offset >> 2);
- }
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Label not_deoptimized;
@@ -568,6 +577,29 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(scratch);
+ __ Cmp(kJavaScriptCallCodeStartRegister, scratch);
+ __ Csetm(kSpeculationPoisonRegister, eq);
+ __ Csdb();
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+
+ __ Mov(scratch, sp);
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(scratch, scratch, kSpeculationPoisonRegister);
+ __ Mov(sp, scratch);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -680,9 +712,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, temp);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Add(x10, x10, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(x10);
+ static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
+ __ Ldr(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -799,7 +832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
- __ mov(i.OutputRegister(), tasm()->StackPointer());
+ __ mov(i.OutputRegister(), sp);
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
@@ -844,7 +877,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base = offset.from_stack_pointer() ? __ StackPointer() : fp;
+ Register base = offset.from_stack_pointer() ? sp : fp;
__ Add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
@@ -1161,6 +1194,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Sxth32:
__ Sxth(i.OutputRegister32(), i.InputRegister32(0));
break;
+ case kArm64Sxtb:
+ __ Sxtb(i.OutputRegister(), i.InputRegister32(0));
+ break;
+ case kArm64Sxth:
+ __ Sxth(i.OutputRegister(), i.InputRegister32(0));
+ break;
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
@@ -1190,12 +1229,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64CompareAndBranch32:
case kArm64CompareAndBranch:
- // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
+ // Pseudo instruction handled in AssembleArchBranch.
break;
case kArm64Claim: {
int count = i.InputInt32(0);
DCHECK_EQ(count % 2, 0);
- __ AssertCspAligned();
+ __ AssertSpAligned();
if (count > 0) {
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
@@ -1493,33 +1532,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strb:
__ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strh:
__ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrsw:
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
__ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break;
case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
@@ -1542,82 +1588,86 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
- case kAtomicLoadInt8:
+ case kArm64DsbIsb:
+ __ Dsb(FullSystem, BarrierAll);
+ __ Isb();
+ break;
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr);
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB);
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH);
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \
break;
ATOMIC_BINOP_CASE(Add, Add)
@@ -2097,6 +2147,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
@@ -2108,6 +2159,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
@@ -2119,6 +2171,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
@@ -2130,6 +2183,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
+ DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
@@ -2147,6 +2201,19 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ CmovX(kSpeculationPoisonRegister, xzr,
+ FlagsConditionToCondition(condition));
+ __ Csdb();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2189,13 +2256,13 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
pop_count += (pop_count & 1); // align
__ Drop(pop_count);
__ Ret();
} else {
- DCHECK(csp.Is(__ StackPointer()));
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
@@ -2267,11 +2334,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
void CodeGenerator::FinishFrame(Frame* frame) {
frame->AlignFrame(16);
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
+ call_descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
@@ -2281,7 +2348,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
+ call_descriptor->CalleeSavedRegisters());
saved_count = saves.Count();
if (saved_count != 0) {
DCHECK_EQ(saved_count % 2, 0);
@@ -2290,29 +2357,29 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- __ AssertCspAligned();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ __ AssertSpAligned();
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
+ call_descriptor->CalleeSavedRegisters());
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
+ call_descriptor->CalleeSavedFPRegisters());
// The number of slots for returns has to be even to ensure the correct stack
// alignment.
const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
if (frame_access_state()->has_frame()) {
// Link the frame
- if (descriptor->IsJSFunctionCall()) {
+ if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
} else {
__ Push(lr, fp);
- __ Mov(fp, __ StackPointer());
+ __ Mov(fp, sp);
}
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -2328,6 +2395,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
if (info()->IsWasm() && shrink_slots > 128) {
@@ -2346,7 +2414,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ isolate())));
__ Ldr(scratch, MemOperand(scratch));
__ Add(scratch, scratch, shrink_slots * kPointerSize);
- __ Cmp(__ StackPointer(), scratch);
+ __ Cmp(sp, scratch);
__ B(hs, &done);
}
@@ -2356,8 +2424,6 @@ void CodeGenerator::AssembleConstructFrame() {
// runtime call.
__ EnterFrame(StackFrame::WASM_COMPILED);
}
- DCHECK(__ StackPointer().Is(csp));
- __ AssertStackConsistency();
__ Mov(cp, Smi::kZero);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
@@ -2367,7 +2433,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_debug_code) {
__ Brk(0);
}
- __ AssertStackConsistency();
__ Bind(&done);
}
@@ -2380,9 +2445,9 @@ void CodeGenerator::AssembleConstructFrame() {
// frame-specific header information, i.e. claiming the extra slot that
// other platforms explicitly push for STUB (code object) frames and frames
// recording their argument count.
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallJSFunction:
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Claim(shrink_slots + 1); // Claim extra slot for argc.
__ Str(kJavaScriptCallArgCountRegister,
MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
@@ -2424,7 +2489,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
@@ -2434,19 +2499,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers.
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
+ call_descriptor->CalleeSavedRegisters());
__ PopCPURegList(saves);
// Restore fp registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
+ call_descriptor->CalleeSavedFPRegisters());
__ PopCPURegList(saves_fp);
unwinding_info_writer_.MarkBlockWillExit();
Arm64OperandConverter g(this, nullptr);
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
- if (descriptor->IsCFunctionCall()) {
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -2473,7 +2538,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ DropArguments(pop_reg);
}
- __ AssertCspAligned();
+ __ AssertSpAligned();
__ Ret();
}
@@ -2482,195 +2547,195 @@ void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- __ Mov(g.ToRegister(destination), src);
+ // Helper function to write the given constant to the dst register.
+ auto MoveConstantToRegister = [&](Register dst, Constant src) {
+ if (src.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Mov(dst, src_object);
+ }
} else {
- __ Str(src, g.ToMemOperand(destination, tasm()));
+ __ Mov(dst, g.ToImmediate(source));
}
- } else if (source->IsStackSlot()) {
- MemOperand src = g.ToMemOperand(source, tasm());
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- if (destination->IsRegister()) {
- __ Ldr(g.ToRegister(destination), src);
- } else {
- UseScratchRegisterScope scope(tasm());
- Register temp = scope.AcquireX();
- __ Ldr(temp, src);
- __ Str(temp, g.ToMemOperand(destination, tasm()));
- }
- } else if (source->IsConstant()) {
- Constant src = g.ToConstant(ConstantOperand::cast(source));
- if (destination->IsRegister() || destination->IsStackSlot()) {
- UseScratchRegisterScope scope(tasm());
- Register dst = destination->IsRegister() ? g.ToRegister(destination)
- : scope.AcquireX();
- if (src.type() == Constant::kHeapObject) {
- Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
- if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
- } else {
- __ Mov(dst, src_object);
- }
+ };
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ Mov(g.ToRegister(destination), g.ToRegister(source));
+ } else if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ __ Mov(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
} else {
- __ Mov(dst, g.ToImmediate(source));
- }
- if (destination->IsStackSlot()) {
- __ Str(dst, g.ToMemOperand(destination, tasm()));
+ DCHECK(source->IsSimd128Register());
+ __ Mov(g.ToDoubleRegister(destination).Q(),
+ g.ToDoubleRegister(source).Q());
}
- } else if (src.type() == Constant::kFloat32) {
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination).S();
- __ Fmov(dst, src.ToFloat32());
+ return;
+ case MoveType::kRegisterToStack: {
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (source->IsRegister()) {
+ __ Str(g.ToRegister(source), dst);
} else {
- DCHECK(destination->IsFPStackSlot());
- if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
- __ Str(wzr, g.ToMemOperand(destination, tasm()));
+ VRegister src = g.ToDoubleRegister(source);
+ if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ __ Str(src, dst);
} else {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireS();
- __ Fmov(temp, src.ToFloat32());
- __ Str(temp, g.ToMemOperand(destination, tasm()));
+ DCHECK(source->IsSimd128Register());
+ __ Str(src.Q(), dst);
}
}
- } else {
- DCHECK_EQ(Constant::kFloat64, src.type());
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- __ Fmov(dst, src.ToFloat64().value());
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ MemOperand src = g.ToMemOperand(source, tasm());
+ if (destination->IsRegister()) {
+ __ Ldr(g.ToRegister(destination), src);
} else {
- DCHECK(destination->IsFPStackSlot());
- if (src.ToFloat64().AsUint64() == 0) {
- __ Str(xzr, g.ToMemOperand(destination, tasm()));
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (destination->IsFloatRegister() || destination->IsDoubleRegister()) {
+ __ Ldr(dst, src);
} else {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireD();
- __ Fmov(temp, src.ToFloat64().value());
- __ Str(temp, g.ToMemOperand(destination, tasm()));
+ DCHECK(destination->IsSimd128Register());
+ __ Ldr(dst.Q(), src);
}
}
+ return;
}
- } else if (source->IsFPRegister()) {
- VRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- if (destination->IsSimd128Register()) {
- __ Mov(dst.Q(), src.Q());
- } else {
- __ Mov(dst, src);
- }
- } else {
- DCHECK(destination->IsFPStackSlot());
+ case MoveType::kStackToStack: {
+ MemOperand src = g.ToMemOperand(source, tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
- if (destination->IsSimd128StackSlot()) {
- __ Str(src.Q(), dst);
+ if (source->IsSimd128StackSlot()) {
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireQ();
+ __ Ldr(temp, src);
+ __ Str(temp, dst);
} else {
- __ Str(src, dst);
+ UseScratchRegisterScope scope(tasm());
+ Register temp = scope.AcquireX();
+ __ Ldr(temp, src);
+ __ Str(temp, dst);
}
+ return;
}
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- MemOperand src = g.ToMemOperand(source, tasm());
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- if (destination->IsSimd128Register()) {
- __ Ldr(dst.Q(), src);
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ MoveConstantToRegister(g.ToRegister(destination), src);
} else {
- __ Ldr(dst, src);
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (destination->IsFloatRegister()) {
+ __ Fmov(dst.S(), src.ToFloat32());
+ } else {
+ DCHECK(destination->IsDoubleRegister());
+ __ Fmov(dst, src.ToFloat64().value());
+ }
}
- } else {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireD();
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
MemOperand dst = g.ToMemOperand(destination, tasm());
- if (destination->IsSimd128StackSlot()) {
- __ Ldr(temp.Q(), src);
- __ Str(temp.Q(), dst);
- } else {
- __ Ldr(temp, src);
+ if (destination->IsStackSlot()) {
+ UseScratchRegisterScope scope(tasm());
+ Register temp = scope.AcquireX();
+ MoveConstantToRegister(temp, src);
__ Str(temp, dst);
+ } else if (destination->IsFloatStackSlot()) {
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ Str(wzr, dst);
+ } else {
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireS();
+ __ Fmov(temp, src.ToFloat32());
+ __ Str(temp, dst);
+ }
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ if (src.ToFloat64().AsUint64() == 0) {
+ __ Str(xzr, dst);
+ } else {
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ __ Fmov(temp, src.ToFloat64().value());
+ __ Str(temp, dst);
+ }
}
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
Arm64OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- UseScratchRegisterScope scope(tasm());
- Register temp = scope.AcquireX();
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Mov(temp, src);
- __ Mov(src, dst);
- __ Mov(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination, tasm());
- __ Mov(temp, src);
- __ Ldr(src, dst);
- __ Str(temp, dst);
- }
- } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
- UseScratchRegisterScope scope(tasm());
- VRegister temp_0 = scope.AcquireD();
- VRegister temp_1 = scope.AcquireD();
- MemOperand src = g.ToMemOperand(source, tasm());
- MemOperand dst = g.ToMemOperand(destination, tasm());
- if (source->IsSimd128StackSlot()) {
- __ Ldr(temp_0.Q(), src);
- __ Ldr(temp_1.Q(), dst);
- __ Str(temp_0.Q(), dst);
- __ Str(temp_1.Q(), src);
- } else {
- __ Ldr(temp_0, src);
- __ Ldr(temp_1, dst);
- __ Str(temp_0, dst);
- __ Str(temp_1, src);
- }
- } else if (source->IsFPRegister()) {
- UseScratchRegisterScope scope(tasm());
- VRegister temp = scope.AcquireD();
- VRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- VRegister dst = g.ToDoubleRegister(destination);
- if (source->IsSimd128Register()) {
- __ Mov(temp.Q(), src.Q());
- __ Mov(src.Q(), dst.Q());
- __ Mov(dst.Q(), temp.Q());
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ Swap(g.ToRegister(source), g.ToRegister(destination));
} else {
- __ Mov(temp, src);
- __ Mov(src, dst);
- __ Mov(dst, temp);
+ VRegister src = g.ToDoubleRegister(source);
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ __ Swap(src, dst);
+ } else {
+ DCHECK(source->IsSimd128Register());
+ __ Swap(src.Q(), dst.Q());
+ }
}
- } else {
- DCHECK(destination->IsFPStackSlot());
+ return;
+ case MoveType::kRegisterToStack: {
+ UseScratchRegisterScope scope(tasm());
MemOperand dst = g.ToMemOperand(destination, tasm());
- if (source->IsSimd128Register()) {
- __ Mov(temp.Q(), src.Q());
- __ Ldr(src.Q(), dst);
- __ Str(temp.Q(), dst);
- } else {
+ if (source->IsRegister()) {
+ Register temp = scope.AcquireX();
+ Register src = g.ToRegister(source);
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
+ } else {
+ UseScratchRegisterScope scope(tasm());
+ VRegister src = g.ToDoubleRegister(source);
+ if (source->IsFloatRegister() || source->IsDoubleRegister()) {
+ VRegister temp = scope.AcquireD();
+ __ Mov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ } else {
+ DCHECK(source->IsSimd128Register());
+ VRegister temp = scope.AcquireQ();
+ __ Mov(temp, src.Q());
+ __ Ldr(src.Q(), dst);
+ __ Str(temp, dst);
+ }
}
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ case MoveType::kStackToStack: {
+ UseScratchRegisterScope scope(tasm());
+ MemOperand src = g.ToMemOperand(source, tasm());
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ VRegister temp_0 = scope.AcquireD();
+ VRegister temp_1 = scope.AcquireD();
+ if (source->IsSimd128StackSlot()) {
+ __ Ldr(temp_0.Q(), src);
+ __ Ldr(temp_1.Q(), dst);
+ __ Str(temp_0.Q(), dst);
+ __ Str(temp_1.Q(), src);
+ } else {
+ __ Ldr(temp_0, src);
+ __ Ldr(temp_1, dst);
+ __ Str(temp_0, dst);
+ __ Str(temp_1, src);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -2680,7 +2745,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNREACHABLE();
}
-
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 820b55a99d..72218ce8fd 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -67,6 +67,8 @@ namespace compiler {
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
V(Arm64Sxtw) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
@@ -155,6 +157,7 @@ namespace compiler {
V(Arm64StrW) \
V(Arm64Ldr) \
V(Arm64Str) \
+ V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index c2b0a4e386..5378cb2f9c 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -67,7 +67,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ror:
case kArm64Ror32:
case kArm64Mov32:
+ case kArm64Sxtb:
case kArm64Sxtb32:
+ case kArm64Sxth:
case kArm64Sxth32:
case kArm64Sxtw:
case kArm64Sbfx32:
@@ -306,6 +308,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Strh:
case kArm64StrW:
case kArm64Str:
+ case kArm64DsbIsb:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index d6082c9f0a..0787ccdc0f 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -629,9 +629,16 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
EmitLoad(this, node, opcode, immediate_mode, rep);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1238,6 +1245,7 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
#define RRR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1245,6 +1253,7 @@ RR_OP_LIST(RR_VISITOR)
}
RRR_OP_LIST(RRR_VISITOR)
#undef RRR_VISITOR
+#undef RRR_OP_LIST
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
@@ -1258,7 +1267,10 @@ void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64DsbIsb, g.NoOutput());
+}
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
@@ -1642,6 +1654,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
default:
break;
}
+ break;
}
default:
break;
@@ -1680,7 +1693,7 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
Arm64OperandGenerator g(this);
@@ -1692,7 +1705,7 @@ void InstructionSelector::EmitPrepareArguments(
// Bump the stack pointer(s).
if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
- // TODO(titzer): it would be better to bump the csp here only
+ // TODO(titzer): it would be better to bump the sp here only
// and emit paired stores with increment for non c frames.
Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
}
@@ -1720,9 +1733,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
Arm64OperandGenerator g(this);
int reverse_slot = 0;
@@ -1731,7 +1744,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
reverse_slot += output.location.GetSizeInPointers();
// Skip any alignment holes in nodes.
if (output.node == nullptr) continue;
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
@@ -1939,6 +1952,9 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
// against {value}, depending on the condition.
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
+ // Branch poisoning requires flags to be set, so when it's enabled for
+ // a particular branch, we shouldn't be applying the cbz/tbz optimization.
+ DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@@ -1991,7 +2007,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
return true;
}
}
- } // Fall through.
+ V8_FALLTHROUGH;
+ }
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan: {
if (value != 0) return false;
@@ -2010,16 +2027,18 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
- if (m.right().HasValue()) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
- cond, cont)) {
- return;
- }
- } else if (m.left().HasValue()) {
- FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
- commuted_cond, cont)) {
- return;
+ if (!cont->IsPoisoned()) {
+ if (m.right().HasValue()) {
+ if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
+ cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasValue()) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
+ commuted_cond, cont)) {
+ return;
+ }
}
}
ArchOpcode opcode = kArm64Cmp32;
@@ -2092,7 +2111,7 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- if (cont->IsBranch() && m.right().HasValue() &&
+ if (cont->IsBranch() && !cont->IsPoisoned() && m.right().HasValue() &&
base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
@@ -2142,12 +2161,13 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
}
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- Arm64OperandGenerator g(selector);
+} // namespace
+
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(this);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -2156,84 +2176,83 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
Node* const left = m.left().node();
- if (selector->CanCover(value, left) &&
- left->opcode() == IrOpcode::kWord64And) {
+ if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
// Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
// into a tbz/tbnz instruction.
if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- selector, left, cont)) {
+ this, left, cont)) {
return;
}
- return VisitWordCompare(selector, left, kArm64Tst, cont, true,
+ return VisitWordCompare(this, left, kArm64Tst, cont, true,
kLogical64Imm);
}
// Merge the Word64Equal(x, 0) comparison into a cbz instruction.
- if (cont->IsBranch() || cont->IsDeoptimize()) {
- EmitBranchOrDeoptimize(selector,
- cont->Encode(kArm64CompareAndBranch),
+ if ((cont->IsBranch() || cont->IsDeoptimize()) &&
+ !cont->IsPoisoned()) {
+ EmitBranchOrDeoptimize(this, cont->Encode(kArm64CompareAndBranch),
g.UseRegister(left), cont);
return;
}
}
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
+ return VisitWordCompare(this, value, kArm64Cmp, cont, false,
kArithmeticImm);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kFloatLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -2245,30 +2264,30 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kArm64Add32, kArithmeticImm, cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
+ kArithmeticImm, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(
- selector, node, kArm64Sub32, kArithmeticImm, cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
+ kArithmeticImm, cont);
case IrOpcode::kInt32MulWithOverflow:
// ARM64 doesn't set the overflow flag for multiplication, so we
// need to test on kNotEqual. Here is the code sequence used:
// smull result, left, right
// cmp result.X(), Operand(result, SXTW)
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return EmitInt32MulWithOverflow(this, node, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
kArithmeticImm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
kArithmeticImm, cont);
default:
break;
@@ -2277,23 +2296,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Add:
- return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
+ return VisitWordCompare(this, value, kArm64Cmn32, cont, true,
kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
- selector, value, cont)) {
+ this, value, cont)) {
return;
}
- return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
+ return VisitWordCompare(this, value, kArm64Tst32, cont, true,
kLogical32Imm);
case IrOpcode::kWord64And:
if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- selector, value, cont)) {
+ this, value, cont)) {
return;
}
- return VisitWordCompare(selector, value, kArm64Tst, cont, true,
+ return VisitWordCompare(this, value, kArm64Tst, cont, true,
kLogical64Imm);
default:
break;
@@ -2302,80 +2321,52 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Branch could not be combined with a compare, compare against 0 and branch.
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
- g.UseRegister(value), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ if (cont->IsPoisoned()) {
+ // We need an instruction that sets flags for poisoning to work.
+ Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ }
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
- g.UseRegister(value), g.UseRegister(value),
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
+ EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
+ g.UseRegister(value), g.UseRegister(value), cont->kind(),
+ cont->reason(), cont->feedback(), cont->frame_state());
} else {
DCHECK(cont->IsTrap());
- selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
- g.UseRegister(value), g.UseRegister(value),
- g.UseImmediate(cont->trap_id()));
+ Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
+ g.UseRegister(value), g.UseImmediate(cont->trap_id()));
}
}
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kArm64Sub32, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArm64Sub32, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2622,7 +2613,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2630,13 +2621,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2650,7 +2643,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2659,13 +2652,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2683,7 +2676,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2691,15 +2684,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2718,7 +2711,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2727,15 +2720,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2793,11 +2786,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2820,11 +2814,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4, 4) \
- V(16x8, 8) \
- V(8x16, 16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
@@ -2957,6 +2946,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
}
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
+#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2964,6 +2954,7 @@ SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
}
SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2971,6 +2962,7 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
+#undef SIMD_SHIFT_OP_LIST
#define SIMD_VISIT_BINOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -2978,6 +2970,7 @@ SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
}
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
@@ -3132,6 +3125,26 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ VisitRR(this, kArm64Sxtb32, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ VisitRR(this, kArm64Sxth32, node);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxtb, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxth, node);
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ VisitRR(this, kArm64Sxtw, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -3148,7 +3161,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ReverseBits |
- MachineOperatorBuilder::kWord64ReverseBits;
+ MachineOperatorBuilder::kWord64ReverseBits |
+ MachineOperatorBuilder::kSpeculationFence;
}
// static
@@ -3158,6 +3172,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
index a532851d84..25c4fcf77f 100644
--- a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
+++ b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
-#define V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
+#ifndef V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
+#define V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
#include "src/eh-frame.h"
@@ -69,4 +69,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
index 074f19b308..3a5b729966 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.h
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -29,4 +29,4 @@ class BasicBlockInstrumentor : public AllStatic {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 53c3435b55..3d71e98a12 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -16,7 +16,8 @@ BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
Zone* zone)
: AdvancedReducer(editor),
jsgraph_(js_graph),
- node_conditions_(zone, js_graph->graph()->NodeCount()),
+ node_conditions_(js_graph->graph()->NodeCount(), zone),
+ reduced_(js_graph->graph()->NodeCount(), zone),
zone_(zone),
dead_(js_graph->Dead()) {}
@@ -55,26 +56,32 @@ Reduction BranchElimination::Reduce(Node* node) {
Reduction BranchElimination::ReduceBranch(Node* node) {
Node* condition = node->InputAt(0);
Node* control_input = NodeProperties::GetControlInput(node, 0);
- const ControlPathConditions* from_input = node_conditions_.Get(control_input);
- if (from_input != nullptr) {
- Maybe<bool> condition_value = from_input->LookupCondition(condition);
- // If we know the condition we can discard the branch.
- if (condition_value.IsJust()) {
- bool known_value = condition_value.FromJust();
- for (Node* const use : node->uses()) {
- switch (use->opcode()) {
- case IrOpcode::kIfTrue:
- Replace(use, known_value ? control_input : dead());
- break;
- case IrOpcode::kIfFalse:
- Replace(use, known_value ? dead() : control_input);
- break;
- default:
- UNREACHABLE();
- }
+ ControlPathConditions from_input = node_conditions_.Get(control_input);
+ Node* branch;
+ bool condition_value;
+ // If we know the condition we can discard the branch.
+ if (from_input.LookupCondition(condition, &branch, &condition_value)) {
+ // Mark the branch as a safety check.
+ // Check if {branch} is dead because we might have a stale side-table entry.
+ if (IsSafetyCheckOf(node->op()) == IsSafetyCheck::kSafetyCheck &&
+ !branch->IsDead()) {
+ NodeProperties::ChangeOp(branch,
+ common()->MarkAsSafetyCheck(branch->op()));
+ }
+
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ Replace(use, condition_value ? control_input : dead());
+ break;
+ case IrOpcode::kIfFalse:
+ Replace(use, condition_value ? dead() : control_input);
+ break;
+ default:
+ UNREACHABLE();
}
- return Replace(dead());
}
+ return Replace(dead());
}
return TakeConditionsFromFirstControl(node);
}
@@ -88,45 +95,53 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ControlPathConditions const* conditions = node_conditions_.Get(control);
// If we do not know anything about the predecessor, do not propagate just
// yet because we will have to recompute anyway once we compute the
// predecessor.
- if (conditions == nullptr) {
- return UpdateConditions(node, conditions);
- }
- Maybe<bool> condition_value = conditions->LookupCondition(condition);
- if (condition_value.IsJust()) {
+ if (!reduced_.Get(control)) {
+ return NoChange();
+ }
+
+ ControlPathConditions conditions = node_conditions_.Get(control);
+ bool condition_value;
+ Node* branch;
+ if (conditions.LookupCondition(condition, &branch, &condition_value)) {
+ // Mark the branch as a safety check.
+ if (p.is_safety_check() == IsSafetyCheck::kSafetyCheck) {
+ NodeProperties::ChangeOp(branch,
+ common()->MarkAsSafetyCheck(branch->op()));
+ }
+
// If we know the condition we can discard the branch.
- if (condition_is_true == condition_value.FromJust()) {
+ if (condition_is_true == condition_value) {
// We don't update the conditions here, because we're replacing {node}
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
control = graph()->NewNode(
- common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
- frame_state, effect, control);
+ common()->Deoptimize(p.kind(), p.reason(), p.feedback()), frame_state,
+ effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
}
return Replace(dead());
}
- return UpdateConditions(node, conditions, condition, condition_is_true);
+ return UpdateConditions(node, conditions, condition, node, condition_is_true);
}
Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
// Add the condition to the list arriving from the input branch.
Node* branch = NodeProperties::GetControlInput(node, 0);
- const ControlPathConditions* from_branch = node_conditions_.Get(branch);
+ ControlPathConditions from_branch = node_conditions_.Get(branch);
// If we do not know anything about the predecessor, do not propagate just
// yet because we will have to recompute anyway once we compute the
// predecessor.
- if (from_branch == nullptr) {
- return UpdateConditions(node, nullptr);
+ if (!reduced_.Get(branch)) {
+ return NoChange();
}
Node* condition = branch->InputAt(0);
- return UpdateConditions(node, from_branch, condition, is_true_branch);
+ return UpdateConditions(node, from_branch, condition, branch, is_true_branch);
}
@@ -143,8 +158,8 @@ Reduction BranchElimination::ReduceMerge(Node* node) {
// input.
Node::Inputs inputs = node->inputs();
for (Node* input : inputs) {
- if (node_conditions_.Get(input) == nullptr) {
- return UpdateConditions(node, nullptr);
+ if (!reduced_.Get(input)) {
+ return NoChange();
}
}
@@ -152,42 +167,23 @@ Reduction BranchElimination::ReduceMerge(Node* node) {
DCHECK_GT(inputs.count(), 0);
- const ControlPathConditions* first = node_conditions_.Get(*input_it);
+ ControlPathConditions conditions = node_conditions_.Get(*input_it);
++input_it;
- // Make a copy of the first input's conditions and merge with the conditions
- // from other inputs.
- ControlPathConditions* conditions =
- new (zone_->New(sizeof(ControlPathConditions)))
- ControlPathConditions(*first);
+ // Merge the first input's conditions with the conditions from the other
+ // inputs.
auto input_end = inputs.end();
for (; input_it != input_end; ++input_it) {
- conditions->Merge(*(node_conditions_.Get(*input_it)));
+ // Change the current condition list to a longest common tail
+ // of this condition list and the other list. (The common tail
+ // should correspond to the list from the common dominator.)
+ conditions.ResetToCommonAncestor(node_conditions_.Get(*input_it));
}
-
return UpdateConditions(node, conditions);
}
Reduction BranchElimination::ReduceStart(Node* node) {
- return UpdateConditions(node, ControlPathConditions::Empty(zone_));
-}
-
-const BranchElimination::ControlPathConditions*
-BranchElimination::PathConditionsForControlNodes::Get(Node* node) const {
- if (static_cast<size_t>(node->id()) < info_for_node_.size()) {
- return info_for_node_[node->id()];
- }
- return nullptr;
-}
-
-
-void BranchElimination::PathConditionsForControlNodes::Set(
- Node* node, const ControlPathConditions* conditions) {
- size_t index = static_cast<size_t>(node->id());
- if (index >= info_for_node_.size()) {
- info_for_node_.resize(index + 1, nullptr);
- }
- info_for_node_[index] = conditions;
+ return UpdateConditions(node, {});
}
@@ -200,157 +196,58 @@ Reduction BranchElimination::ReduceOtherControl(Node* node) {
Reduction BranchElimination::TakeConditionsFromFirstControl(Node* node) {
// We just propagate the information from the control input (ideally,
// we would only revisit control uses if there is change).
- const ControlPathConditions* from_input =
- node_conditions_.Get(NodeProperties::GetControlInput(node, 0));
- return UpdateConditions(node, from_input);
+ Node* input = NodeProperties::GetControlInput(node, 0);
+ if (!reduced_.Get(input)) return NoChange();
+ return UpdateConditions(node, node_conditions_.Get(input));
}
-
Reduction BranchElimination::UpdateConditions(
- Node* node, const ControlPathConditions* conditions) {
- const ControlPathConditions* original = node_conditions_.Get(node);
+ Node* node, ControlPathConditions conditions) {
// Only signal that the node has Changed if the condition information has
// changed.
- if (conditions != original) {
- if (conditions == nullptr || original == nullptr ||
- *conditions != *original) {
- node_conditions_.Set(node, conditions);
- return Changed(node);
- }
+ if (reduced_.Set(node, true) | node_conditions_.Set(node, conditions)) {
+ return Changed(node);
}
return NoChange();
}
Reduction BranchElimination::UpdateConditions(
- Node* node, const ControlPathConditions* prev_conditions,
- Node* current_condition, bool is_true_branch) {
- const ControlPathConditions* original = node_conditions_.Get(node);
- DCHECK(prev_conditions != nullptr && current_condition != nullptr);
+ Node* node, ControlPathConditions prev_conditions, Node* current_condition,
+ Node* current_branch, bool is_true_branch) {
+ ControlPathConditions original = node_conditions_.Get(node);
// The control path for the node is the path obtained by appending the
- // current_condition to the prev_conditions. Check if this new control path
- // would be the same as the already recorded path (original).
- if (original == nullptr || !prev_conditions->EqualsAfterAddingCondition(
- original, current_condition, is_true_branch)) {
- // If this is the first visit or if the control path is different from the
- // recorded path create the new control path and record it.
- const ControlPathConditions* new_condition =
- prev_conditions->AddCondition(zone_, current_condition, is_true_branch);
- node_conditions_.Set(node, new_condition);
- return Changed(node);
- }
- return NoChange();
-}
-
-// static
-const BranchElimination::ControlPathConditions*
-BranchElimination::ControlPathConditions::Empty(Zone* zone) {
- return new (zone->New(sizeof(ControlPathConditions)))
- ControlPathConditions(nullptr, 0);
-}
-
-
-void BranchElimination::ControlPathConditions::Merge(
- const ControlPathConditions& other) {
- // Change the current condition list to a longest common tail
- // of this condition list and the other list. (The common tail
- // should correspond to the list from the common dominator.)
-
- // First, we throw away the prefix of the longer list, so that
- // we have lists of the same length.
- size_t other_size = other.condition_count_;
- BranchCondition* other_condition = other.head_;
- while (other_size > condition_count_) {
- other_condition = other_condition->next;
- other_size--;
- }
- while (condition_count_ > other_size) {
- head_ = head_->next;
- condition_count_--;
- }
-
- // Then we go through both lists in lock-step until we find
- // the common tail.
- while (head_ != other_condition) {
- DCHECK_LT(0, condition_count_);
- condition_count_--;
- other_condition = other_condition->next;
- head_ = head_->next;
- }
-}
-
-
-const BranchElimination::ControlPathConditions*
-BranchElimination::ControlPathConditions::AddCondition(Zone* zone,
- Node* condition,
- bool is_true) const {
- DCHECK(LookupCondition(condition).IsNothing());
-
- BranchCondition* new_head = new (zone->New(sizeof(BranchCondition)))
- BranchCondition(condition, is_true, head_);
-
- ControlPathConditions* conditions =
- new (zone->New(sizeof(ControlPathConditions)))
- ControlPathConditions(new_head, condition_count_ + 1);
- return conditions;
-}
-
-
-Maybe<bool> BranchElimination::ControlPathConditions::LookupCondition(
- Node* condition) const {
- for (BranchCondition* current = head_; current != nullptr;
- current = current->next) {
- if (current->condition == condition) {
- return Just<bool>(current->is_true);
+ // current_condition to the prev_conditions. Use the original control path as
+ // a hint to avoid allocations.
+ prev_conditions.AddCondition(zone_, current_condition, current_branch,
+ is_true_branch, original);
+ return UpdateConditions(node, prev_conditions);
+}
+
+void BranchElimination::ControlPathConditions::AddCondition(
+ Zone* zone, Node* condition, Node* branch, bool is_true,
+ ControlPathConditions hint) {
+ DCHECK_EQ(false, LookupCondition(condition, nullptr, nullptr));
+ PushFront({condition, branch, is_true}, zone, hint);
+}
+
+bool BranchElimination::ControlPathConditions::LookupCondition(
+ Node* condition, Node** branch, bool* is_true) const {
+ for (BranchCondition element : *this) {
+ if (element.condition == condition) {
+ *is_true = element.is_true;
+ *branch = element.branch;
+ return true;
}
}
- return Nothing<bool>();
-}
-
-bool BranchElimination::ControlPathConditions::IsSamePath(
- BranchCondition* this_condition, BranchCondition* other_condition) const {
- while (true) {
- if (this_condition == other_condition) return true;
- if (this_condition->condition != other_condition->condition ||
- this_condition->is_true != other_condition->is_true) {
- return false;
- }
- this_condition = this_condition->next;
- other_condition = other_condition->next;
+ return false;
}
- UNREACHABLE();
-}
-bool BranchElimination::ControlPathConditions::operator==(
- const ControlPathConditions& other) const {
- if (condition_count_ != other.condition_count_) return false;
- return IsSamePath(head_, other.head_);
-}
+ Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
-bool BranchElimination::ControlPathConditions::EqualsAfterAddingCondition(
- const ControlPathConditions* other, const Node* new_condition,
- bool new_branch_direction) const {
- // When an extra condition is added to the current chain, the count of
- // the resulting chain would increase by 1. Quick check to see if counts
- // match.
- if (other->condition_count_ != condition_count_ + 1) return false;
-
- // Check if the head of the other chain is same as the new condition that
- // would be added.
- if (other->head_->condition != new_condition ||
- other->head_->is_true != new_branch_direction) {
- return false;
+ CommonOperatorBuilder* BranchElimination::common() const {
+ return jsgraph()->common();
}
- // Check if the rest of the path is the same as the prev_condition.
- return IsSamePath(other->head_->next, head_);
-}
-
-Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
-
-CommonOperatorBuilder* BranchElimination::common() const {
- return jsgraph()->common();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index d78933e734..de3b9e5b2e 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
-#define V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#ifndef V8_COMPILER_BRANCH_ELIMINATION_H_
+#define V8_COMPILER_BRANCH_ELIMINATION_H_
#include "src/base/compiler-specific.h"
+#include "src/compiler/functional-list.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/node-aux-data.h"
#include "src/globals.h"
namespace v8 {
@@ -30,56 +32,27 @@ class V8_EXPORT_PRIVATE BranchElimination final
private:
struct BranchCondition {
Node* condition;
+ Node* branch;
bool is_true;
- BranchCondition* next;
- BranchCondition(Node* condition, bool is_true, BranchCondition* next)
- : condition(condition), is_true(is_true), next(next) {}
+ bool operator==(BranchCondition other) const {
+ return condition == other.condition && branch == other.branch &&
+ is_true == other.is_true;
+ }
+ bool operator!=(BranchCondition other) const { return !(*this == other); }
};
// Class for tracking information about branch conditions.
// At the moment it is a linked list of conditions and their values
// (true or false).
- class ControlPathConditions {
+ class ControlPathConditions : public FunctionalList<BranchCondition> {
public:
- Maybe<bool> LookupCondition(Node* condition) const;
-
- const ControlPathConditions* AddCondition(Zone* zone, Node* condition,
- bool is_true) const;
- static const ControlPathConditions* Empty(Zone* zone);
- void Merge(const ControlPathConditions& other);
-
- bool IsSamePath(BranchCondition* first, BranchCondition* second) const;
- bool EqualsAfterAddingCondition(const ControlPathConditions* other,
- const Node* new_condition,
- bool new_branch_condition) const;
- bool operator==(const ControlPathConditions& other) const;
- bool operator!=(const ControlPathConditions& other) const {
- return !(*this == other);
- }
+ bool LookupCondition(Node* condition, Node** branch, bool* is_true) const;
+ void AddCondition(Zone* zone, Node* condition, Node* branch, bool is_true,
+ ControlPathConditions hint);
private:
- ControlPathConditions(BranchCondition* head, size_t condition_count)
- : head_(head), condition_count_(condition_count) {}
-
- BranchCondition* head_;
- // We keep track of the list length so that we can find the longest
- // common tail easily.
- size_t condition_count_;
- };
-
- // Maps each control node to the condition information known about the node.
- // If the information is nullptr, then we have not calculated the information
- // yet.
- class PathConditionsForControlNodes {
- public:
- PathConditionsForControlNodes(Zone* zone, size_t size_hint)
- : info_for_node_(size_hint, nullptr, zone) {}
- const ControlPathConditions* Get(Node* node) const;
- void Set(Node* node, const ControlPathConditions* conditions);
-
- private:
- ZoneVector<const ControlPathConditions*> info_for_node_;
+ using FunctionalList<BranchCondition>::PushFront;
};
Reduction ReduceBranch(Node* node);
@@ -91,11 +64,10 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction ReduceOtherControl(Node* node);
Reduction TakeConditionsFromFirstControl(Node* node);
- Reduction UpdateConditions(Node* node,
- const ControlPathConditions* conditions);
- Reduction UpdateConditions(Node* node,
- const ControlPathConditions* prev_conditions,
- Node* current_condition, bool is_true_branch);
+ Reduction UpdateConditions(Node* node, ControlPathConditions conditions);
+ Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions,
+ Node* current_condition, Node* current_branch,
+ bool is_true_branch);
Node* dead() const { return dead_; }
Graph* graph() const;
@@ -103,7 +75,12 @@ class V8_EXPORT_PRIVATE BranchElimination final
CommonOperatorBuilder* common() const;
JSGraph* const jsgraph_;
- PathConditionsForControlNodes node_conditions_;
+
+ // Maps each control node to the condition information known about the node.
+ // If the information is nullptr, then we have not calculated the information
+ // yet.
+ NodeAuxData<ControlPathConditions> node_conditions_;
+ NodeAuxData<bool> reduced_;
Zone* zone_;
Node* dead_;
};
@@ -112,4 +89,4 @@ class V8_EXPORT_PRIVATE BranchElimination final
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#endif // V8_COMPILER_BRANCH_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 4ee30bcdf2..980869ccd3 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -61,6 +61,22 @@ bool BytecodeLoopAssignments::ContainsLocal(int index) const {
return bit_vector_->Contains(parameter_count_ + index);
}
+ResumeJumpTarget::ResumeJumpTarget(int suspend_id, int target_offset,
+ int final_target_offset)
+ : suspend_id_(suspend_id),
+ target_offset_(target_offset),
+ final_target_offset_(final_target_offset) {}
+
+ResumeJumpTarget ResumeJumpTarget::Leaf(int suspend_id, int target_offset) {
+ return ResumeJumpTarget(suspend_id, target_offset, target_offset);
+}
+
+ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
+ const ResumeJumpTarget& next) {
+ return ResumeJumpTarget(next.suspend_id(), loop_header_offset,
+ next.target_offset());
+}
+
BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
Zone* zone, bool do_liveness_analysis)
: bytecode_array_(bytecode_array),
@@ -68,6 +84,7 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
zone_(zone),
loop_stack_(zone),
loop_end_index_queue_(zone),
+ resume_jump_targets_(zone),
end_to_header_(zone),
header_to_info_(zone),
osr_entry_point_(-1),
@@ -80,6 +97,21 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+ // Special case Suspend and Resume to just pass through liveness.
+ if (bytecode == Bytecode::kSuspendGenerator) {
+ // The generator object has to be live.
+ in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+ // Suspend additionally reads and returns the accumulator
+ DCHECK(Bytecodes::ReadsAccumulator(bytecode));
+ in_liveness.MarkAccumulatorLive();
+ return;
+ }
+ if (bytecode == Bytecode::kResumeGenerator) {
+ // The generator object has to be live.
+ in_liveness.MarkRegisterLive(accessor.GetRegisterOperand(0).index());
+ return;
+ }
+
if (Bytecodes::WritesAccumulator(bytecode)) {
in_liveness.MarkAccumulatorDead();
}
@@ -175,6 +207,13 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
int current_offset = accessor.current_offset();
const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
+ // Special case Suspend and Resume to just pass through liveness.
+ if (bytecode == Bytecode::kSuspendGenerator ||
+ bytecode == Bytecode::kResumeGenerator) {
+ out_liveness.Union(*next_bytecode_in_liveness);
+ return;
+ }
+
// Update from jump target (if any). Skip loops, we update these manually in
// the liveness iterations.
if (Bytecodes::IsForwardJump(bytecode)) {
@@ -197,9 +236,9 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
int handler_context;
// TODO(leszeks): We should look up this range only once per entry.
- HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
+ HandlerTable table(*bytecode_array);
int handler_offset =
- table->LookupRange(current_offset, &handler_context, nullptr);
+ table.LookupRange(current_offset, &handler_context, nullptr);
if (handler_offset != -1) {
bool was_accumulator_live = out_liveness.AccumulatorIsLive();
@@ -221,6 +260,18 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
}
}
+void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
+ BytecodeLivenessState** next_bytecode_in_liveness,
+ const interpreter::BytecodeArrayAccessor& accessor,
+ const BytecodeLivenessMap& liveness_map) {
+ UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness,
+ accessor, liveness_map);
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, accessor);
+
+ *next_bytecode_in_liveness = liveness.in;
+}
+
void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
@@ -260,14 +311,21 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
- int osr_loop_end_offset =
- osr_bailout_id.IsNone() ? -1 : osr_bailout_id.ToInt();
+ bool is_osr = !osr_bailout_id.IsNone();
+ int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1;
+
+ int generator_switch_index = -1;
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
Bytecode bytecode = iterator.current_bytecode();
int current_offset = iterator.current_offset();
+ if (bytecode == Bytecode::kSwitchOnGeneratorState) {
+ DCHECK_EQ(generator_switch_index, -1);
+ generator_switch_index = iterator.current_index();
+ }
+
if (bytecode == Bytecode::kJumpLoop) {
// Every byte up to and including the last byte within the backwards jump
// instruction is considered part of the loop, set loop end accordingly.
@@ -298,32 +356,84 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
// information we currently have.
UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
+ // Update suspend counts for this loop, though only if not OSR.
+ if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ int suspend_id = iterator.GetUnsignedImmediateOperand(3);
+ int resume_offset = current_offset + iterator.current_bytecode_size();
+ current_loop_info->AddResumeTarget(
+ ResumeJumpTarget::Leaf(suspend_id, resume_offset));
+ }
+
+ // If we've reached the header of the loop, pop it off the stack.
if (current_offset == current_loop.header_offset) {
loop_stack_.pop();
if (loop_stack_.size() > 1) {
- // Propagate inner loop assignments to outer loop.
- loop_stack_.top().loop_info->assignments().Union(
+ // If there is still an outer loop, propagate inner loop assignments.
+ LoopInfo* parent_loop_info = loop_stack_.top().loop_info;
+
+ parent_loop_info->assignments().Union(
current_loop_info->assignments());
+
+ // Also, propagate resume targets. Instead of jumping to the target
+ // itself, the outer loop will jump to this loop header for any
+ // targets that are inside the current loop, so that this loop stays
+ // reducible. Hence, a nested loop of the form:
+ //
+ // switch (#1 -> suspend1, #2 -> suspend2)
+ // loop {
+ // suspend1: suspend #1
+ // loop {
+ // suspend2: suspend #2
+ // }
+ // }
+ //
+ // becomes:
+ //
+ // switch (#1 -> loop1, #2 -> loop1)
+ // loop1: loop {
+ // switch (#1 -> suspend1, #2 -> loop2)
+ // suspend1: suspend #1
+ // loop2: loop {
+ // switch (#2 -> suspend2)
+ // suspend2: suspend #2
+ // }
+ // }
+ for (const auto& target : current_loop_info->resume_jump_targets()) {
+ parent_loop_info->AddResumeTarget(
+ ResumeJumpTarget::AtLoopHeader(current_offset, target));
+ }
+
+ } else {
+ // Otherwise, just propagate inner loop suspends to top-level.
+ for (const auto& target : current_loop_info->resume_jump_targets()) {
+ resume_jump_targets_.push_back(
+ ResumeJumpTarget::AtLoopHeader(current_offset, target));
+ }
}
}
+ } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ // If we're not in a loop, we still need to look for suspends.
+ // TODO(leszeks): It would be nice to de-duplicate this with the in-loop
+ // case
+ int suspend_id = iterator.GetUnsignedImmediateOperand(3);
+ int resume_offset = current_offset + iterator.current_bytecode_size();
+ resume_jump_targets_.push_back(
+ ResumeJumpTarget::Leaf(suspend_id, resume_offset));
}
if (do_liveness_analysis_) {
BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
current_offset, bytecode_array()->register_count(), zone());
-
- UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
- iterator, liveness_map_);
- liveness.in->CopyFrom(*liveness.out);
- UpdateInLiveness(bytecode, *liveness.in, iterator);
-
- next_bytecode_in_liveness = liveness.in;
+ UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
+ liveness_map_);
}
}
DCHECK_EQ(loop_stack_.size(), 1u);
DCHECK_EQ(loop_stack_.top().header_offset, -1);
+ DCHECK(ResumeJumpTargetsAreValid());
+
if (!do_liveness_analysis_) return;
// At this point, every bytecode has a valid in and out liveness, except for
@@ -374,16 +484,11 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
--iterator;
for (; iterator.current_offset() > header_offset; --iterator) {
Bytecode bytecode = iterator.current_bytecode();
-
int current_offset = iterator.current_offset();
BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
- UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
- iterator, liveness_map_);
- liveness.in->CopyFrom(*liveness.out);
- UpdateInLiveness(bytecode, *liveness.in, iterator);
-
- next_bytecode_in_liveness = liveness.in;
+ UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
+ liveness_map_);
}
// Now we are at the loop header. Since the in-liveness of the header
// can't change, we need only to update the out-liveness.
@@ -391,6 +496,47 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
next_bytecode_in_liveness, iterator, liveness_map_);
}
+ // Process the generator switch statement separately, once the loops are done.
+ // This has to be a separate pass because the generator switch can jump into
+ // the middle of loops (and is the only kind of jump that can jump across a
+ // loop header).
+ if (generator_switch_index != -1) {
+ iterator.GoToIndex(generator_switch_index);
+ DCHECK_EQ(iterator.current_bytecode(), Bytecode::kSwitchOnGeneratorState);
+
+ int current_offset = iterator.current_offset();
+ BytecodeLiveness& switch_liveness =
+ liveness_map_.GetLiveness(current_offset);
+
+ bool any_changed = false;
+ for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
+ if (switch_liveness.out->UnionIsChanged(
+ *liveness_map_.GetInLiveness(entry.target_offset))) {
+ any_changed = true;
+ }
+ }
+
+ // If the switch liveness changed, we have to propagate it up the remaining
+ // bytecodes before it.
+ if (any_changed) {
+ switch_liveness.in->CopyFrom(*switch_liveness.out);
+ UpdateInLiveness(Bytecode::kSwitchOnGeneratorState, *switch_liveness.in,
+ iterator);
+ next_bytecode_in_liveness = switch_liveness.in;
+ for (--iterator; iterator.IsValid(); --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+ BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+ // There shouldn't be any more loops.
+ DCHECK_NE(bytecode, Bytecode::kJumpLoop);
+
+ UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
+ liveness_map_);
+ }
+ }
+ }
+
DCHECK(LivenessIsValid());
}
@@ -497,6 +643,154 @@ std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
}
#if DEBUG
+bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
+ bool valid = true;
+
+ // Find the generator switch.
+ interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+ for (iterator.GoToStart(); iterator.IsValid(); ++iterator) {
+ if (iterator.current_bytecode() == Bytecode::kSwitchOnGeneratorState) {
+ break;
+ }
+ }
+
+ // If the iterator is invalid, we've reached the end without finding the
+ // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we
+ // need no jump targets. So, ensure there are no jump targets and exit.
+ if (!iterator.IsValid() || HasOsrEntryPoint()) {
+ // Check top-level.
+ if (!resume_jump_targets().empty()) {
+ PrintF(stderr,
+ "Found %zu top-level resume targets but no resume switch\n",
+ resume_jump_targets().size());
+ valid = false;
+ }
+ // Check loops.
+ for (const std::pair<int, LoopInfo>& loop_info : header_to_info_) {
+ if (!loop_info.second.resume_jump_targets().empty()) {
+ PrintF(stderr,
+ "Found %zu resume targets at loop at offset %d, but no resume "
+ "switch\n",
+ loop_info.second.resume_jump_targets().size(), loop_info.first);
+ valid = false;
+ }
+ }
+
+ return valid;
+ }
+
+ // Otherwise, we've found the resume switch. Check that the top level jumps
+ // only to leaves and loop headers, then check that each loop header handles
+ // all the unresolved jumps, also jumping only to leaves and inner loop
+ // headers.
+
+ // First collect all required suspend ids.
+ std::map<int, int> unresolved_suspend_ids;
+ for (const interpreter::JumpTableTargetOffset& offset :
+ iterator.GetJumpTableTargetOffsets()) {
+ int suspend_id = offset.case_value;
+ int resume_offset = offset.target_offset;
+
+ unresolved_suspend_ids[suspend_id] = resume_offset;
+ }
+
+ // Check top-level.
+ if (!ResumeJumpTargetLeavesResolveSuspendIds(-1, resume_jump_targets(),
+ &unresolved_suspend_ids)) {
+ valid = false;
+ }
+ // Check loops.
+ for (const std::pair<int, LoopInfo>& loop_info : header_to_info_) {
+ if (!ResumeJumpTargetLeavesResolveSuspendIds(
+ loop_info.first, loop_info.second.resume_jump_targets(),
+ &unresolved_suspend_ids)) {
+ valid = false;
+ }
+ }
+
+ // Check that everything is resolved.
+ if (!unresolved_suspend_ids.empty()) {
+ PrintF(stderr,
+ "Found suspend ids that are not resolved by a final leaf resume "
+ "jump:\n");
+
+ for (const std::pair<int, int>& target : unresolved_suspend_ids) {
+ PrintF(stderr, " %d -> %d\n", target.first, target.second);
+ }
+ valid = false;
+ }
+
+ return valid;
+}
+
+bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds(
+ int parent_offset, const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ std::map<int, int>* unresolved_suspend_ids) {
+ bool valid = true;
+ for (const ResumeJumpTarget& target : resume_jump_targets) {
+ std::map<int, int>::iterator it =
+ unresolved_suspend_ids->find(target.suspend_id());
+ if (it == unresolved_suspend_ids->end()) {
+ PrintF(
+ stderr,
+ "No unresolved suspend found for resume target with suspend id %d\n",
+ target.suspend_id());
+ valid = false;
+ continue;
+ }
+ int expected_target = it->second;
+
+ if (target.is_leaf()) {
+ // Leaves should have the expected target as their target.
+ if (target.target_offset() != expected_target) {
+ PrintF(
+ stderr,
+ "Expected leaf resume target for id %d to have target offset %d, "
+ "but had %d\n",
+ target.suspend_id(), expected_target, target.target_offset());
+ valid = false;
+ } else {
+ // Make sure we're resuming to a Resume bytecode
+ interpreter::BytecodeArrayAccessor assessor(bytecode_array(),
+ target.target_offset());
+ if (assessor.current_bytecode() != Bytecode::kResumeGenerator) {
+ PrintF(stderr,
+ "Expected resume target for id %d, offset %d, to be "
+ "ResumeGenerator, but found %s\n",
+ target.suspend_id(), target.target_offset(),
+ Bytecodes::ToString(assessor.current_bytecode()));
+
+ valid = false;
+ }
+ }
+ // We've resolved this suspend id, so erase it to make sure we don't
+ // resolve it twice.
+ unresolved_suspend_ids->erase(it);
+ } else {
+ // Non-leaves should have a direct inner loop header as their target.
+ if (!IsLoopHeader(target.target_offset())) {
+ PrintF(stderr,
+ "Expected non-leaf resume target for id %d to have a loop "
+ "header at target offset %d\n",
+ target.suspend_id(), target.target_offset());
+ valid = false;
+ } else {
+ LoopInfo loop_info = GetLoopInfoFor(target.target_offset());
+ if (loop_info.parent_offset() != parent_offset) {
+ PrintF(stderr,
+ "Expected non-leaf resume target for id %d to have a direct "
+ "inner loop at target offset %d\n",
+ target.suspend_id(), target.target_offset());
+ valid = false;
+ }
+ // If the target loop is a valid inner loop, we'll check its validity
+ // when we analyze its resume targets.
+ }
+ }
+ }
+ return valid;
+}
+
bool BytecodeAnalysis::LivenessIsValid() {
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index f6cd6e3cab..6ff9ed021a 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -39,15 +39,49 @@ class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
BitVector* bit_vector_;
};
+// Jump targets for resuming a suspended generator.
+class V8_EXPORT_PRIVATE ResumeJumpTarget {
+ public:
+ // Create a resume jump target representing an actual resume.
+ static ResumeJumpTarget Leaf(int suspend_id, int target_offset);
+
+ // Create a resume jump target at a loop header, which will have another
+ // resume jump after the loop header is crossed.
+ static ResumeJumpTarget AtLoopHeader(int loop_header_offset,
+ const ResumeJumpTarget& next);
+
+ int suspend_id() const { return suspend_id_; }
+ int target_offset() const { return target_offset_; }
+ bool is_leaf() const { return target_offset_ == final_target_offset_; }
+
+ private:
+ // The suspend id of the resume.
+ int suspend_id_;
+ // The target offset of this resume jump.
+ int target_offset_;
+ // The final offset of this resume, which may be across multiple jumps.
+ int final_target_offset_;
+
+ ResumeJumpTarget(int suspend_id, int target_offset, int final_target_offset);
+};
+
struct V8_EXPORT_PRIVATE LoopInfo {
public:
LoopInfo(int parent_offset, int parameter_count, int register_count,
Zone* zone)
: parent_offset_(parent_offset),
- assignments_(parameter_count, register_count, zone) {}
+ assignments_(parameter_count, register_count, zone),
+ resume_jump_targets_(zone) {}
int parent_offset() const { return parent_offset_; }
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {
+ return resume_jump_targets_;
+ }
+ void AddResumeTarget(const ResumeJumpTarget& target) {
+ resume_jump_targets_.push_back(target);
+ }
+
BytecodeLoopAssignments& assignments() { return assignments_; }
const BytecodeLoopAssignments& assignments() const { return assignments_; }
@@ -55,6 +89,7 @@ struct V8_EXPORT_PRIVATE LoopInfo {
// The offset to the parent loop, or -1 if there is no parent.
int parent_offset_;
BytecodeLoopAssignments assignments_;
+ ZoneVector<ResumeJumpTarget> resume_jump_targets_;
};
class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
@@ -78,10 +113,16 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
// Get the loop info of the loop header at {header_offset}.
const LoopInfo& GetLoopInfoFor(int header_offset) const;
+ // Get the top-level resume jump targets.
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {
+ return resume_jump_targets_;
+ }
+
// True if the current analysis has an OSR entry point.
bool HasOsrEntryPoint() const { return osr_entry_point_ != -1; }
int osr_entry_point() const { return osr_entry_point_; }
+
// Gets the in-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetInLivenessFor(int offset) const;
@@ -99,6 +140,12 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
void PushLoop(int loop_header, int loop_end);
#if DEBUG
+ bool ResumeJumpTargetsAreValid();
+ bool ResumeJumpTargetLeavesResolveSuspendIds(
+ int parent_offset,
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ std::map<int, int>* unresolved_suspend_ids);
+
bool LivenessIsValid();
#endif
@@ -112,6 +159,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
+ ZoneVector<ResumeJumpTarget> resume_jump_targets_;
ZoneMap<int, int> end_to_header_;
ZoneMap<int, LoopInfo> header_to_info_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 54a924fce4..3b2a3eb252 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -9,6 +9,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
@@ -40,6 +41,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* LookupAccumulator() const;
Node* LookupRegister(interpreter::Register the_register) const;
+ Node* LookupGeneratorState() const;
void BindAccumulator(Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -48,6 +50,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
void BindRegistersToProjections(
interpreter::Register first_reg, Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
+ void BindGeneratorState(Node* node);
void RecordAfterState(Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -108,6 +111,7 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* effect_dependency_;
NodeVector values_;
Node* parameters_state_values_;
+ Node* generator_state_;
int register_base_;
int accumulator_base_;
};
@@ -138,7 +142,8 @@ BytecodeGraphBuilder::Environment::Environment(
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
values_(builder->local_zone()),
- parameters_state_values_(nullptr) {
+ parameters_state_values_(nullptr),
+ generator_state_(nullptr) {
// The layout of values_ is:
//
// [receiver] [parameters] [registers] [accumulator]
@@ -191,6 +196,7 @@ BytecodeGraphBuilder::Environment::Environment(
effect_dependency_(other->effect_dependency_),
values_(other->zone()),
parameters_state_values_(other->parameters_state_values_),
+ generator_state_(other->generator_state_),
register_base_(other->register_base_),
accumulator_base_(other->accumulator_base_) {
values_ = other->values_;
@@ -210,6 +216,10 @@ Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
return values()->at(accumulator_base_);
}
+Node* BytecodeGraphBuilder::Environment::LookupGeneratorState() const {
+ DCHECK_NOT_NULL(generator_state_);
+ return generator_state_;
+}
Node* BytecodeGraphBuilder::Environment::LookupRegister(
interpreter::Register the_register) const {
@@ -231,6 +241,10 @@ void BytecodeGraphBuilder::Environment::BindAccumulator(
values()->at(accumulator_base_) = node;
}
+void BytecodeGraphBuilder::Environment::BindGeneratorState(Node* node) {
+ generator_state_ = node;
+}
+
void BytecodeGraphBuilder::Environment::BindRegister(
interpreter::Register the_register, Node* node,
FrameStateAttachmentMode mode) {
@@ -291,9 +305,18 @@ void BytecodeGraphBuilder::Environment::Merge(
for (int i = 0; i < register_count(); i++) {
int index = register_base() + i;
if (liveness == nullptr || liveness->RegisterIsLive(i)) {
- DCHECK_NE(values_[index], builder()->jsgraph()->OptimizedOutConstant());
- DCHECK_NE(other->values_[index],
- builder()->jsgraph()->OptimizedOutConstant());
+#if DEBUG
+ // We only do these DCHECKs when we are not in the resume path of a
+ // generator -- this is, when either there is no generator state at all,
+ // or the generator state is not the constant "executing" value.
+ if (generator_state_ == nullptr ||
+ NumberMatcher(generator_state_)
+ .Is(JSGeneratorObject::kGeneratorExecuting)) {
+ DCHECK_NE(values_[index], builder()->jsgraph()->OptimizedOutConstant());
+ DCHECK_NE(other->values_[index],
+ builder()->jsgraph()->OptimizedOutConstant());
+ }
+#endif
values_[index] =
builder()->MergeValue(values_[index], other->values_[index], control);
@@ -315,6 +338,12 @@ void BytecodeGraphBuilder::Environment::Merge(
} else {
values_[accumulator_base()] = builder()->jsgraph()->OptimizedOutConstant();
}
+
+ if (generator_state_ != nullptr) {
+ DCHECK_NOT_NULL(other->generator_state_);
+ generator_state_ = builder()->MergeValue(generator_state_,
+ other->generator_state_, control);
+ }
}
void BytecodeGraphBuilder::Environment::PrepareForLoop(
@@ -345,6 +374,10 @@ void BytecodeGraphBuilder::Environment::PrepareForLoop(
// The accumulator should not be live on entry.
DCHECK_IMPLIES(liveness != nullptr, !liveness->AccumulatorIsLive());
+ if (generator_state_ != nullptr) {
+ generator_state_ = builder()->NewPhi(1, generator_state_, control);
+ }
+
// Connect to the loop end.
Node* terminate = builder()->graph()->NewNode(
builder()->common()->Terminate(), effect, control);
@@ -423,6 +456,11 @@ void BytecodeGraphBuilder::Environment::PrepareForLoopExit(
values_[accumulator_base()], loop_exit);
values_[accumulator_base()] = rename;
}
+
+ if (generator_state_ != nullptr) {
+ generator_state_ = graph()->NewNode(common()->LoopExitValue(),
+ generator_state_, loop_exit);
+ }
}
void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
@@ -483,8 +521,6 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
bytecode_array_(handle(shared_info->bytecode_array())),
- exception_handler_table_(
- handle(HandlerTable::cast(bytecode_array()->handler_table()))),
feedback_vector_(feedback_vector),
type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
@@ -498,6 +534,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
currently_peeled_loop_offset_(-1),
stack_check_(stack_check),
merge_environments_(local_zone),
+ generator_merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
input_buffer_size_(0),
@@ -529,7 +566,7 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
}
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
- return VectorSlotPair(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ return VectorSlotPair(feedback_vector(), FeedbackVector::ToSlot(slot_id));
}
void BytecodeGraphBuilder::CreateGraph() {
@@ -847,6 +884,11 @@ void BytecodeGraphBuilder::VisitBytecodes() {
bytecode_analysis.PrintLivenessTo(of);
}
+ if (!bytecode_analysis.resume_jump_targets().empty()) {
+ environment()->BindGeneratorState(
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+ }
+
if (bytecode_analysis.HasOsrEntryPoint()) {
// We peel the OSR loop and any outer loop containing it except that we
// leave the nodes corresponding to the whole outermost loop (including
@@ -1393,14 +1435,17 @@ void BytecodeGraphBuilder::VisitPopContext() {
void BytecodeGraphBuilder::VisitCreateClosure() {
Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
- int const slot_id = bytecode_iterator().GetIndexOperand(1);
- VectorSlotPair pair = CreateVectorSlotPair(slot_id);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ FeedbackNexus nexus(feedback_vector(), slot);
PretenureFlag tenured =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
? TENURED
: NOT_TENURED;
- const Operator* op = javascript()->CreateClosure(shared_info, pair, tenured);
+ const Operator* op = javascript()->CreateClosure(
+ shared_info, nexus.GetFeedbackCell(),
+ handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy)),
+ tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
}
@@ -1540,12 +1585,21 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
Handle<TemplateObjectDescription> description =
Handle<TemplateObjectDescription>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
- // It's not observable when the template object is created, so we
- // can just create it eagerly during graph building and bake in
- // the JSArray constant here.
- Node* template_object =
- jsgraph()->HeapConstant(TemplateObjectDescription::GetTemplateObject(
- description, native_context()));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ FeedbackNexus nexus(feedback_vector(), slot);
+
+ Handle<JSArray> cached_value;
+ if (nexus.GetFeedback() == Smi::kZero) {
+ // It's not observable when the template object is created, so we
+ // can just create it eagerly during graph building and bake in
+ // the JSArray constant here.
+ cached_value = TemplateObjectDescription::CreateTemplateObject(description);
+ nexus.vector()->Set(slot, *cached_value);
+ } else {
+ cached_value = handle(JSArray::cast(nexus.GetFeedback()));
+ }
+
+ Node* template_object = jsgraph()->HeapConstant(cached_value);
environment()->BindAccumulator(template_object);
}
@@ -2015,8 +2069,8 @@ void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* operand = environment()->LookupAccumulator();
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kUnaryOperationHintIndex));
+ FeedbackSlot slot =
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedUnaryOp(op, operand, slot);
if (lowering.IsExit()) return;
@@ -2038,8 +2092,8 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kBinaryOperationHintIndex));
+ FeedbackSlot slot =
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedBinaryOp(op, left, right, slot);
if (lowering.IsExit()) return;
@@ -2059,28 +2113,23 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
// feedback.
BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
int operand_index) {
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(operand_index));
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot));
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
+ FeedbackNexus nexus(feedback_vector(), slot);
return nexus.GetBinaryOperationFeedback();
}
// Helper function to create compare operation hint from the recorded type
// feedback.
CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
- int slot_index = bytecode_iterator().GetIndexOperand(1);
- FeedbackSlot slot = feedback_vector()->ToSlot(slot_index);
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot));
- CompareICNexus nexus(feedback_vector(), slot);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ FeedbackNexus nexus(feedback_vector(), slot);
return nexus.GetCompareOperationFeedback();
}
// Helper function to create for-in mode from the recorded type feedback.
ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(operand_index));
- ForInICNexus nexus(feedback_vector(), slot);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
+ FeedbackNexus nexus(feedback_vector(), slot);
switch (nexus.GetForInFeedback()) {
case ForInHint::kNone:
case ForInHint::kEnumCacheKeysAndIndices:
@@ -2095,13 +2144,13 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
if (invocation_frequency_.IsUnknown()) return CallFrequency();
- CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
return CallFrequency(nexus.ComputeCallFrequency() *
invocation_frequency_.value());
}
SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
- CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
return nexus.GetSpeculationMode();
}
@@ -2173,8 +2222,8 @@ void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
Node* left = environment()->LookupAccumulator();
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
- FeedbackSlot slot = feedback_vector()->ToSlot(
- bytecode_iterator().GetIndexOperand(kBinaryOperationSmiHintIndex));
+ FeedbackSlot slot =
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedBinaryOp(op, left, right, slot);
if (lowering.IsExit()) return;
@@ -2288,8 +2337,7 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
- int slot_index = bytecode_iterator().GetIndexOperand(1);
- FeedbackSlot slot = feedback_vector()->ToSlot(slot_index);
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedBinaryOp(op, left, right, slot);
if (lowering.IsExit()) return;
@@ -2452,8 +2500,7 @@ void BytecodeGraphBuilder::VisitToNumber() {
PrepareEagerCheckpoint();
Node* object = environment()->LookupAccumulator();
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(0));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(0);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedToNumber(object, slot);
@@ -2474,8 +2521,7 @@ void BytecodeGraphBuilder::VisitToNumeric() {
// If we have some kind of Number feedback, we do the same lowering as for
// ToNumber.
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(0));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(0);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedToNumber(object, slot);
@@ -2591,15 +2637,19 @@ void BytecodeGraphBuilder::VisitSetPendingMessage() {
environment()->BindAccumulator(previous_message);
}
-void BytecodeGraphBuilder::VisitReturn() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
- bytecode_iterator().current_offset()));
+void BytecodeGraphBuilder::BuildReturn(const BytecodeLivenessState* liveness) {
+ BuildLoopExitsForFunctionExit(liveness);
Node* pop_node = jsgraph()->ZeroConstant();
Node* control =
NewNode(common()->Return(), pop_node, environment()->LookupAccumulator());
MergeControlToLeaveFunction(control);
}
+void BytecodeGraphBuilder::VisitReturn() {
+ BuildReturn(bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset()));
+}
+
void BytecodeGraphBuilder::VisitDebugger() {
PrepareEagerCheckpoint();
Node* call = NewNode(javascript()->Debugger());
@@ -2633,8 +2683,7 @@ void BytecodeGraphBuilder::VisitForInPrepare() {
PrepareEagerCheckpoint();
Node* enumerator = environment()->LookupAccumulator();
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
JSTypeHintLowering::LoweringResult lowering =
TryBuildSimplifiedForInPrepare(enumerator, slot);
if (lowering.IsExit()) return;
@@ -2675,8 +2724,7 @@ void BytecodeGraphBuilder::VisitForInNext() {
environment()->GetControlDependency());
environment()->UpdateEffectDependency(index);
- FeedbackSlot slot =
- feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(3));
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(3);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedForInNext(
receiver, cache_array, cache_type, index, slot);
if (lowering.IsExit()) return;
@@ -2714,54 +2762,135 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
jsgraph()->Constant(bytecode_iterator().current_offset() +
(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ const BytecodeLivenessState* liveness = bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset());
+
+ // Maybe overallocate the value list since we don't know how many registers
+ // are live.
+ // TODO(leszeks): We could get this count from liveness rather than the
+ // register list.
int value_input_count = 3 + register_count;
Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
value_inputs[0] = generator;
value_inputs[1] = suspend_id;
value_inputs[2] = offset;
+
+ int count_written = 0;
for (int i = 0; i < register_count; ++i) {
- value_inputs[3 + i] =
- environment()->LookupRegister(interpreter::Register(i));
+ if (liveness == nullptr || liveness->RegisterIsLive(i)) {
+ while (count_written < i) {
+ value_inputs[3 + count_written++] = jsgraph()->OptimizedOutConstant();
+ }
+ value_inputs[3 + count_written++] =
+ environment()->LookupRegister(interpreter::Register(i));
+ DCHECK_EQ(count_written, i + 1);
+ }
}
- MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
+ // Use the actual written count rather than the register count to create the
+ // node.
+ MakeNode(javascript()->GeneratorStore(count_written), 3 + count_written,
value_inputs, false);
+
+ // TODO(leszeks): This over-approximates the liveness at exit, only the
+ // accumulator should be live by this point.
+ BuildReturn(bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset()));
}
-void BytecodeGraphBuilder::VisitRestoreGeneratorState() {
- Node* generator = environment()->LookupRegister(
- bytecode_iterator().GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildSwitchOnGeneratorState(
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ bool allow_fallthrough_on_executing) {
+ Node* generator_state = environment()->LookupGeneratorState();
+
+ int extra_cases = allow_fallthrough_on_executing ? 2 : 1;
+ NewSwitch(generator_state,
+ static_cast<int>(resume_jump_targets.size() + extra_cases));
+ for (const ResumeJumpTarget& target : resume_jump_targets) {
+ SubEnvironment sub_environment(this);
+ NewIfValue(target.suspend_id());
+ if (target.is_leaf()) {
+ // Mark that we are resuming executing.
+ environment()->BindGeneratorState(
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+ }
+ // Jump to the target offset, whether it's a loop header or the resume.
+ MergeIntoSuccessorEnvironment(target.target_offset());
+ }
+
+ {
+ SubEnvironment sub_environment(this);
+ // We should never hit the default case (assuming generator state cannot be
+ // corrupted), so abort if we do.
+ // TODO(leszeks): Maybe only check this in debug mode, and otherwise use
+ // the default to represent one of the cases above/fallthrough below?
+ NewIfDefault();
+ NewNode(simplified()->RuntimeAbort(AbortReason::kInvalidJumpTableIndex));
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
+
+ if (allow_fallthrough_on_executing) {
+ // If we are executing (rather than resuming), and we allow it, just fall
+ // through to the actual loop body.
+ NewIfValue(JSGeneratorObject::kGeneratorExecuting);
+ } else {
+ // Otherwise, this environment is dead.
+ set_environment(nullptr);
+ }
+}
+
+void BytecodeGraphBuilder::VisitSwitchOnGeneratorState() {
+ Node* generator =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+
+ Node* generator_is_undefined =
+ NewNode(simplified()->ReferenceEqual(), generator,
+ jsgraph()->UndefinedConstant());
- Node* state =
- NewNode(javascript()->GeneratorRestoreContinuation(), generator);
+ NewBranch(generator_is_undefined);
+ {
+ SubEnvironment resume_env(this);
+ NewIfFalse();
+
+ Node* generator_state =
+ NewNode(javascript()->GeneratorRestoreContinuation(), generator);
+ environment()->BindGeneratorState(generator_state);
+
+ Node* generator_context =
+ NewNode(javascript()->GeneratorRestoreContext(), generator);
+ environment()->SetContext(generator_context);
+
+ BuildSwitchOnGeneratorState(bytecode_analysis()->resume_jump_targets(),
+ false);
+ }
- environment()->BindAccumulator(state, Environment::kAttachFrameState);
+ // Fallthrough for the first-call case.
+ NewIfTrue();
}
void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* generator =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- interpreter::Register generator_state_reg =
- bytecode_iterator().GetRegisterOperand(1);
- interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(2);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
// We assume we are restoring registers starting fromm index 0.
CHECK_EQ(0, first_reg.index());
- int register_count =
- static_cast<int>(bytecode_iterator().GetRegisterCountOperand(3));
+
+ const BytecodeLivenessState* liveness =
+ bytecode_analysis()->GetOutLivenessFor(
+ bytecode_iterator().current_offset());
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
- for (int i = 0; i < register_count; ++i) {
- Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
- environment()->BindRegister(interpreter::Register(i), value);
+ for (int i = 0; i < environment()->register_count(); ++i) {
+ if (liveness == nullptr || liveness->RegisterIsLive(i)) {
+ Node* value =
+ NewNode(javascript()->GeneratorRestoreRegister(i), generator);
+ environment()->BindRegister(interpreter::Register(i), value);
+ }
}
- // We're no longer resuming, so update the state register.
- environment()->BindRegister(
- generator_state_reg,
- jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
-
// Update the accumulator with the generator's input_or_debug_pos.
Node* input_or_debug_pos =
NewNode(javascript()->GeneratorRestoreInputOrDebugPos(), generator);
@@ -2803,12 +2932,29 @@ void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
const BytecodeLivenessState* liveness =
bytecode_analysis()->GetInLivenessFor(current_offset);
+ const auto& resume_jump_targets = loop_info.resume_jump_targets();
+ bool generate_suspend_switch = !resume_jump_targets.empty();
+
// Add loop header.
environment()->PrepareForLoop(loop_info.assignments(), liveness);
// Store a copy of the environment so we can connect merged back edge inputs
// to the loop header.
merge_environments_[current_offset] = environment()->Copy();
+
+ // If this loop contains resumes, create a new switch just after the loop
+ // for those resumes.
+ if (generate_suspend_switch) {
+ BuildSwitchOnGeneratorState(loop_info.resume_jump_targets(), true);
+
+ // TODO(leszeks): At this point we know we are executing rather than
+ // resuming, so we should be able to prune off the phis in the environment
+ // related to the resume path.
+
+ // Set the generator state to a known constant.
+ environment()->BindGeneratorState(
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+ }
}
}
@@ -2874,7 +3020,7 @@ void BytecodeGraphBuilder::BuildJump() {
}
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
- NewBranch(condition);
+ NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -2884,7 +3030,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
- NewBranch(condition);
+ NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -2908,7 +3054,8 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- NewBranch(environment()->LookupAccumulator());
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
+ IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -2920,7 +3067,8 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- NewBranch(environment()->LookupAccumulator());
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
+ IsSafetyCheck::kNoSafetyCheck);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -3123,8 +3271,7 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
}
void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) {
- Handle<HandlerTable> table = exception_handler_table();
- int num_entries = table->NumberOfRangeEntries();
+ HandlerTable table(*bytecode_array());
// Potentially exit exception handlers.
while (!exception_handlers_.empty()) {
@@ -3134,12 +3281,13 @@ void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) {
}
// Potentially enter exception handlers.
+ int num_entries = table.NumberOfRangeEntries();
while (current_exception_handler_ < num_entries) {
- int next_start = table->GetRangeStart(current_exception_handler_);
+ int next_start = table.GetRangeStart(current_exception_handler_);
if (current_offset < next_start) break; // Not yet covered by range.
- int next_end = table->GetRangeEnd(current_exception_handler_);
- int next_handler = table->GetRangeHandler(current_exception_handler_);
- int context_register = table->GetRangeData(current_exception_handler_);
+ int next_end = table.GetRangeEnd(current_exception_handler_);
+ int next_handler = table.GetRangeHandler(current_exception_handler_);
+ int context_register = table.GetRangeData(current_exception_handler_);
exception_handlers_.push(
{next_start, next_end, next_handler, context_register});
current_exception_handler_++;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 91b857298c..75d464f79e 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -100,8 +100,9 @@ class BytecodeGraphBuilder {
Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
- return NewNode(common()->Branch(hint), condition);
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
+ return NewNode(common()->Branch(hint, is_safety_check), condition);
}
Node* NewSwitch(Node* condition, int control_output_count) {
return NewNode(common()->Switch(control_output_count), condition);
@@ -252,6 +253,9 @@ class BytecodeGraphBuilder {
void BuildJumpIfJSReceiver();
void BuildSwitchOnSmi(Node* condition);
+ void BuildSwitchOnGeneratorState(
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ bool allow_fallthrough_on_executing);
// Simulates control flow by forward-propagating environments.
void MergeIntoSuccessorEnvironment(int target_offset);
@@ -268,6 +272,9 @@ class BytecodeGraphBuilder {
void BuildLoopExitsUntilLoop(int loop_offset,
const BytecodeLivenessState* liveness);
+ // Helper for building a return (from an actual return or a suspend).
+ void BuildReturn(const BytecodeLivenessState* liveness);
+
// Simulates entry and exit of exception handlers.
void ExitThenEnterExceptionHandlers(int current_offset);
@@ -303,9 +310,6 @@ class BytecodeGraphBuilder {
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
}
- const Handle<HandlerTable>& exception_handler_table() const {
- return exception_handler_table_;
- }
const Handle<FeedbackVector>& feedback_vector() const {
return feedback_vector_;
}
@@ -366,7 +370,6 @@ class BytecodeGraphBuilder {
JSGraph* jsgraph_;
CallFrequency const invocation_frequency_;
Handle<BytecodeArray> bytecode_array_;
- Handle<HandlerTable> exception_handler_table_;
Handle<FeedbackVector> feedback_vector_;
const JSTypeHintLowering type_hint_lowering_;
const FrameStateFunctionInfo* frame_state_function_info_;
@@ -379,9 +382,18 @@ class BytecodeGraphBuilder {
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
- // values from all predecessors of the merge in question.
+ // values from all predecessors of the merge in question. They are indexed by
+ // the bytecode offset
ZoneMap<int, Environment*> merge_environments_;
+ // Generator merge environments are snapshots of the current resume
+ // environment, tracing back through loop headers to the resume switch of a
+ // generator. They allow us to model a single resume jump as several switch
+ // statements across loop headers, keeping those loop headers reducible,
+ // without having to merge the "executing" environments of the generator into
+ // the "resuming" ones. They are indexed by the suspend id of the resume.
+ ZoneMap<int, Environment*> generator_merge_environments_;
+
// Exception handlers currently entered by the iteration.
ZoneStack<ExceptionHandler> exception_handlers_;
int current_exception_handler_;
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 330b19fac3..02b6f5fb3d 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -165,8 +165,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
#ifdef UNSUPPORTED_C_LINKAGE
// This method should not be called on unknown architectures.
- V8_Fatal(__FILE__, __LINE__,
- "requested C call descriptor on unsupported architecture");
+ FATAL("requested C call descriptor on unsupported architecture");
return nullptr;
#endif
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 071f8952db..4ad286c68c 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -6,6 +6,7 @@
#include <ostream>
+#include "src/builtins/constants-table-builder.h"
#include "src/code-factory.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
@@ -58,6 +59,8 @@ CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
Code::Kind kind, const char* name, size_t result_size, uint32_t stub_key,
int32_t builtin_index)
+ // TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
+ // bytecode handlers?
: CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
@@ -232,6 +235,33 @@ bool CodeAssembler::IsIntPtrAbsWithOverflowSupported() const {
: IsInt32AbsWithOverflowSupported();
}
+#ifdef V8_EMBEDDED_BUILTINS
+TNode<HeapObject> CodeAssembler::LookupConstant(Handle<HeapObject> object) {
+ DCHECK(isolate()->serializer_enabled());
+
+ // Ensure the given object is in the builtins constants table and fetch its
+ // index.
+ BuiltinsConstantsTableBuilder* builder =
+ isolate()->builtins_constants_table_builder();
+ uint32_t index = builder->AddObject(object);
+
+ // The builtins constants table is loaded through the root register on all
+ // supported platforms. This is checked by the
+ // VerifyBuiltinsIsolateIndependence cctest, which disallows embedded objects
+ // in isolate-independent builtins.
+ DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
+ Heap::kBuiltinsConstantsTableRootIndex));
+ TNode<FixedArray> builtins_constants_table = UncheckedCast<FixedArray>(
+ LoadRoot(Heap::kBuiltinsConstantsTableRootIndex));
+
+ // Generate the lookup.
+ const int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> offset = IntPtrConstant(header_size + kPointerSize * index);
+ return UncheckedCast<HeapObject>(
+ Load(MachineType::AnyTagged(), builtins_constants_table, offset));
+}
+#endif // V8_EMBEDDED_BUILTINS
+
TNode<Int32T> CodeAssembler::Int32Constant(int32_t value) {
return UncheckedCast<Int32T>(raw_assembler()->Int32Constant(value));
}
@@ -264,12 +294,23 @@ TNode<Smi> CodeAssembler::SmiConstant(int value) {
TNode<HeapObject> CodeAssembler::UntypedHeapConstant(
Handle<HeapObject> object) {
+#ifdef V8_EMBEDDED_BUILTINS
+ // Root constants are simply loaded from the root list, while non-root
+ // constants must be looked up from the builtins constants table.
+ if (ShouldLoadConstantsFromRootList()) {
+ Heap::RootListIndex root_index;
+ if (!isolate()->heap()->IsRootHandle(object, &root_index)) {
+ return LookupConstant(object);
+ }
+ }
+#endif // V8_EMBEDDED_BUILTINS
return UncheckedCast<HeapObject>(raw_assembler()->HeapConstant(object));
}
TNode<String> CodeAssembler::StringConstant(const char* str) {
- return UncheckedCast<String>(
- HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED)));
+ Handle<String> internalized_string =
+ factory()->InternalizeOneByteString(OneByteVector(str));
+ return UncheckedCast<String>(HeapConstant(internalized_string));
}
TNode<Oddball> CodeAssembler::BooleanConstant(bool value) {
@@ -338,10 +379,10 @@ Node* CodeAssembler::Parameter(int value) {
}
TNode<Context> CodeAssembler::GetJSContextParameter() {
- CallDescriptor* desc = raw_assembler()->call_descriptor();
- DCHECK(desc->IsJSFunctionCall());
+ auto call_descriptor = raw_assembler()->call_descriptor();
+ DCHECK(call_descriptor->IsJSFunctionCall());
return CAST(Parameter(Linkage::GetJSCallContextParamIndex(
- static_cast<int>(desc->JSParameterCount()))));
+ static_cast<int>(call_descriptor->JSParameterCount()))));
}
void CodeAssembler::Return(SloppyTNode<Object> value) {
@@ -422,6 +463,10 @@ Node* CodeAssembler::LoadStackPointer() {
return raw_assembler()->LoadStackPointer();
}
+Node* CodeAssembler::SpeculationPoison() {
+ return raw_assembler()->SpeculationPoison();
+}
+
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(SloppyTNode<Arg1Type> a, \
SloppyTNode<Arg2Type> b) { \
@@ -950,13 +995,13 @@ Node* CodeAssembler::Projection(int index, Node* value) {
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Variable* exception_var) {
- DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
-
if (if_exception == nullptr) {
// If no handler is supplied, don't add continuations
return;
}
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+
Label success(this), exception(this, Label::kDeferred);
success.MergeVariables();
exception.MergeVariables();
@@ -979,10 +1024,10 @@ TNode<Object> CodeAssembler::CallRuntimeImpl(Runtime::FunctionId function,
SloppyTNode<Object> context,
TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(desc->ReturnCount());
+ int return_count = static_cast<int>(call_descriptor->ReturnCount());
Node* centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
@@ -992,7 +1037,8 @@ TNode<Object> CodeAssembler::CallRuntimeImpl(Runtime::FunctionId function,
Node* nodes[] = {centry, args..., ref, arity, context};
CallPrologue();
- Node* return_value = raw_assembler()->CallN(desc, arraysize(nodes), nodes);
+ Node* return_value =
+ raw_assembler()->CallN(call_descriptor, arraysize(nodes), nodes);
CallEpilogue();
return UncheckedCast<Object>(return_value);
}
@@ -1009,10 +1055,10 @@ TNode<Object> CodeAssembler::TailCallRuntimeImpl(Runtime::FunctionId function,
SloppyTNode<Object> context,
TArgs... args) {
int argc = static_cast<int>(sizeof...(args));
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(desc->ReturnCount());
+ int return_count = static_cast<int>(call_descriptor->ReturnCount());
Node* centry =
HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
@@ -1022,7 +1068,7 @@ TNode<Object> CodeAssembler::TailCallRuntimeImpl(Runtime::FunctionId function,
Node* nodes[] = {centry, args..., ref, arity, context};
return UncheckedCast<Object>(
- raw_assembler()->TailCallN(desc, arraysize(nodes), nodes));
+ raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes));
}
// Instantiate TailCallRuntime() for argument counts used by CSA-generated code
@@ -1061,14 +1107,15 @@ Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
// Extra arguments not mentioned in the descriptor are passed on the stack.
int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size,
pass_context ? Linkage::kPassContext : Linkage::kNoContext);
CallPrologue();
- Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
+ Node* return_value =
+ raw_assembler()->CallN(call_descriptor, input_count, inputs);
CallEpilogue();
return return_value;
}
@@ -1079,14 +1126,14 @@ Node* CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
TArgs... args) {
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
size_t result_size = 1;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
Node* nodes[] = {target, args..., context};
CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes));
- return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+ return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallStub() for argument counts used by CSA-generated code
@@ -1105,13 +1152,13 @@ Node* CodeAssembler::TailCallStubThenBytecodeDispatch(
int stack_parameter_count =
sizeof...(args) - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), 0);
Node* nodes[] = {target, args..., context};
- return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+ return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallJSAndBytecodeDispatch() for argument counts used by
@@ -1127,12 +1174,12 @@ template <class... TArgs>
Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
- CallDescriptor* desc = Linkage::GetBytecodeDispatchCallDescriptor(
+ auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
Node* nodes[] = {target, args...};
CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes));
- return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+ return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes);
}
// Instantiate TailCallBytecodeDispatch() for argument counts used by
@@ -1143,8 +1190,8 @@ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
int input_count, Node* const* inputs) {
- CallDescriptor* desc = Linkage::GetSimplifiedCDescriptor(zone(), signature);
- return raw_assembler()->CallN(desc, input_count, inputs);
+ auto call_descriptor = Linkage::GetSimplifiedCDescriptor(zone(), signature);
+ return raw_assembler()->CallN(call_descriptor, input_count, inputs);
}
Node* CodeAssembler::CallCFunction1(MachineType return_type,
@@ -1593,3 +1640,15 @@ Smi* CheckObjectType(Object* value, Smi* type, String* location) {
} // namespace internal
} // namespace v8
+
+#undef REPEAT_1_TO_2
+#undef REPEAT_1_TO_3
+#undef REPEAT_1_TO_4
+#undef REPEAT_1_TO_5
+#undef REPEAT_1_TO_6
+#undef REPEAT_1_TO_7
+#undef REPEAT_1_TO_8
+#undef REPEAT_1_TO_9
+#undef REPEAT_1_TO_10
+#undef REPEAT_1_TO_11
+#undef REPEAT_1_TO_12
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 9f0d463dc1..1d3abe74f0 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -31,6 +31,11 @@ class JSCollection;
class JSWeakCollection;
class JSWeakMap;
class JSWeakSet;
+class PromiseCapability;
+class PromiseFulfillReactionJobTask;
+class PromiseReaction;
+class PromiseReactionJobTask;
+class PromiseRejectReactionJobTask;
class Factory;
class Zone;
@@ -197,6 +202,7 @@ enum class ObjectType {
class AccessCheckNeeded;
class ClassBoilerplate;
+class BooleanWrapper;
class CompilationCacheTable;
class Constructor;
class Filler;
@@ -208,8 +214,11 @@ class JSSloppyArgumentsObject;
class MapCache;
class MutableHeapNumber;
class NativeContext;
+class NumberWrapper;
+class ScriptWrapper;
class SloppyArgumentsElements;
class StringWrapper;
+class SymbolWrapper;
class Undetectable;
class UniqueName;
class WasmMemoryObject;
@@ -404,6 +413,7 @@ class SloppyTNode : public TNode<T> {
V(IntPtrEqual, BoolT, WordT, WordT) \
V(Uint32LessThan, BoolT, Word32T, Word32T) \
V(Uint32LessThanOrEqual, BoolT, Word32T, Word32T) \
+ V(Uint32GreaterThan, BoolT, Word32T, Word32T) \
V(Uint32GreaterThanOrEqual, BoolT, Word32T, Word32T) \
V(UintPtrLessThan, BoolT, WordT, WordT) \
V(UintPtrLessThanOrEqual, BoolT, WordT, WordT) \
@@ -491,6 +501,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64RoundTruncate, Float64T, Float64T) \
V(Word32Clz, Int32T, Word32T) \
V(Word32Not, Word32T, Word32T) \
+ V(WordNot, WordT, WordT) \
V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \
V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \
V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
@@ -543,7 +554,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Base Assembler
// ===========================================================================
- template <class PreviousType>
+ template <class PreviousType, bool FromTyped>
class CheckedNode {
public:
#ifdef DEBUG
@@ -561,6 +572,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
static_assert(std::is_convertible<TNode<A>, TNode<Object>>::value,
"Coercion to untagged values cannot be "
"checked.");
+ static_assert(
+ !FromTyped ||
+ !std::is_convertible<TNode<PreviousType>, TNode<A>>::value,
+ "Unnecessary CAST: types are convertible.");
#ifdef DEBUG
if (FLAG_debug_code) {
Node* function = code_assembler_->ExternalConstant(
@@ -610,13 +625,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return TNode<T>::UncheckedCast(value);
}
- CheckedNode<Object> Cast(Node* value, const char* location) {
- return CheckedNode<Object>(value, this, location);
+ CheckedNode<Object, false> Cast(Node* value, const char* location) {
+ return {value, this, location};
}
template <class T>
- CheckedNode<T> Cast(TNode<T> value, const char* location) {
- return CheckedNode<T>(value, this, location);
+ CheckedNode<T, true> Cast(TNode<T> value, const char* location) {
+ return {value, this, location};
}
#ifdef DEBUG
@@ -628,6 +643,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#define CAST(x) Cast(x, "")
#endif
+#ifdef V8_EMBEDDED_BUILTINS
+ // Off-heap builtins cannot embed constants within the code object itself,
+ // and thus need to load them from the root list.
+ bool ShouldLoadConstantsFromRootList() const {
+ return (isolate()->serializer_enabled() &&
+ isolate()->builtins_constants_table_builder() != nullptr);
+ }
+
+ TNode<HeapObject> LookupConstant(Handle<HeapObject> object);
+#endif
+
// Constants.
TNode<Int32T> Int32Constant(int32_t value);
TNode<Int64T> Int64Constant(int64_t value);
@@ -651,6 +677,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<ExternalReference> ExternalConstant(ExternalReference address);
TNode<Float64T> Float64Constant(double value);
TNode<HeapNumber> NaNConstant();
+ TNode<BoolT> Int32TrueConstant() {
+ return ReinterpretCast<BoolT>(Int32Constant(1));
+ }
+ TNode<BoolT> Int32FalseConstant() {
+ return ReinterpretCast<BoolT>(Int32Constant(0));
+ }
bool ToInt32Constant(Node* node, int32_t& out_value);
bool ToInt64Constant(Node* node, int64_t& out_value);
@@ -702,6 +734,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Access to the stack pointer
Node* LoadStackPointer();
+ // Poison mask for speculation.
+ Node* SpeculationPoison();
+
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
template <class Type>
@@ -1136,23 +1171,17 @@ class TypedCodeAssemblerVariable : public CodeAssemblerVariable {
initial_value) {}
#endif // DEBUG
- template <class U, class = typename std::enable_if<
- std::is_convertible<TNode<T>, TNode<U>>::value>::type>
- operator TNode<U>() const {
- return TNode<T>::UncheckedCast(value());
+ TNode<T> value() const {
+ return TNode<T>::UncheckedCast(CodeAssemblerVariable::value());
}
- template <class U, class = typename std::enable_if<
- std::is_convertible<TNode<T>, TNode<U>>::value>::type>
- operator SloppyTNode<U>() const {
- return value();
- }
- operator Node*() const { return value(); }
void operator=(TNode<T> value) { Bind(value); }
+ void operator=(const TypedCodeAssemblerVariable<T>& variable) {
+ Bind(variable.value());
+ }
private:
using CodeAssemblerVariable::Bind;
- using CodeAssemblerVariable::value;
};
class CodeAssemblerLabel {
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index c6d3174d8c..1298657774 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -215,8 +215,13 @@ class OutOfLineCode : public ZoneObject {
OutOfLineCode* const next_;
};
+inline bool HasCallDescriptorFlag(Instruction* instr,
+ CallDescriptor::Flag flag) {
+ return MiscField::decode(instr->opcode()) & flag;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H
+#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H_
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 0fb38e5933..71b0394eab 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -42,7 +42,8 @@ CodeGenerator::CodeGenerator(
InstructionSequence* code, CompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper, int start_source_position,
JumpOptimizationInfo* jump_opt,
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions)
+ std::vector<trap_handler::ProtectedInstructionData>* protected_instructions,
+ LoadPoisoning load_poisoning)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -63,6 +64,7 @@ CodeGenerator::CodeGenerator(
deoptimization_literals_(zone()),
inlined_function_count_(0),
translations_(zone()),
+ handler_table_offset_(0),
last_lazy_deopt_pc_(0),
caller_registers_saved_(false),
jump_tables_(nullptr),
@@ -72,7 +74,8 @@ CodeGenerator::CodeGenerator(
optimized_out_literal_id_(-1),
source_position_table_builder_(info->SourcePositionRecordingMode()),
protected_instructions_(protected_instructions),
- result_(kSuccess) {
+ result_(kSuccess),
+ load_poisoning_(load_poisoning) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -148,16 +151,36 @@ void CodeGenerator::AssembleCode() {
ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
}
- // TODO(jupvfranco): This should be the first thing in the code,
- // or otherwise MaybeCallEntryHookDelayed may happen twice (for
- // optimized and deoptimized code).
- // We want to bailout only from JS functions, which are the only ones
+ // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
+ if (FLAG_debug_code & (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
+ info->code_kind() == Code::BYTECODE_HANDLER)) {
+ tasm()->RecordComment("-- Prologue: check code start register --");
+ AssembleCodeStartRegisterCheck();
+ }
+
+ // TODO(jupvfranco): This should be the first thing in the code, otherwise
+ // MaybeCallEntryHookDelayed may happen twice (for optimized and deoptimized
+ // code). We want to bailout only from JS functions, which are the only ones
// that are optimized.
if (info->IsOptimizing()) {
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
+ tasm()->RecordComment("-- Prologue: check for deoptimization --");
BailoutIfDeoptimized();
}
+ // Initialize {kSpeculationPoisonRegister} either by comparing the expected
+ // with the actual call target, or by unconditionally using {-1} initially.
+ // Masking register arguments with it only makes sense in the first case.
+ if (info->is_generating_speculation_poison_on_entry()) {
+ tasm()->RecordComment("-- Prologue: generate speculation poison --");
+ GenerateSpeculationPoison();
+ if (info->is_poisoning_register_arguments()) {
+ AssembleRegisterArgumentPoisoning();
+ }
+ } else {
+ InitializePoisonForLoadsIfNeeded();
+ }
+
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (CompilationInfo::InlinedFunctionHolder& inlined :
@@ -218,6 +241,9 @@ void CodeGenerator::AssembleCode() {
frame_access_state()->MarkHasFrame(block->needs_frame());
tasm()->bind(GetLabel(current_block_));
+
+ TryInsertBranchPoisoning(block);
+
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -287,26 +313,54 @@ void CodeGenerator::AssembleCode() {
unwinding_info_writer_.Finish(tasm()->pc_offset());
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
- result_ = kSuccess;
-}
-Handle<ByteArray> CodeGenerator::GetSourcePositionTable() {
- return source_position_table_builder_.ToSourcePositionTable(isolate());
-}
-
-MaybeHandle<HandlerTable> CodeGenerator::GetHandlerTable() const {
+ // Emit the exception handler table.
if (!handlers_.empty()) {
- Handle<HandlerTable> table =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
- TENURED));
+ handler_table_offset_ = HandlerTable::EmitReturnTableStart(
+ tasm(), static_cast<int>(handlers_.size()));
for (size_t i = 0; i < handlers_.size(); ++i) {
- table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
- table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
+ HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
+ handlers_[i].handler->pos());
+ }
+ }
+
+ result_ = kSuccess;
+}
+
+void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
+ // See if our predecessor was a basic block terminated by a branch_and_poison
+ // instruction. If yes, then perform the masking based on the flags.
+ if (block->PredecessorCount() != 1) return;
+ RpoNumber pred_rpo = (block->predecessors())[0];
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_rpo);
+ if (pred->code_start() == pred->code_end()) return;
+ Instruction* instr = code()->InstructionAt(pred->code_end() - 1);
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ switch (mode) {
+ case kFlags_branch_and_poison: {
+ BranchInfo branch;
+ RpoNumber target = ComputeBranchInfo(&branch, instr);
+ if (!target.IsValid()) {
+ // Non-trivial branch, add the masking code.
+ FlagsCondition condition = branch.condition;
+ if (branch.false_label == GetLabel(block->rpo_number())) {
+ condition = NegateFlagsCondition(condition);
+ }
+ AssembleBranchPoisoning(condition, instr);
+ }
+ break;
}
- return table;
+ case kFlags_deoptimize_and_poison: {
+ UNREACHABLE();
+ break;
+ }
+ default:
+ break;
}
- return {};
+}
+
+Handle<ByteArray> CodeGenerator::GetSourcePositionTable() {
+ return source_position_table_builder_.ToSourcePositionTable(isolate());
}
Handle<Code> CodeGenerator::FinalizeCode() {
@@ -315,18 +369,6 @@ Handle<Code> CodeGenerator::FinalizeCode() {
return Handle<Code>();
}
- // Allocate exception handler table.
- Handle<HandlerTable> table = HandlerTable::Empty(isolate());
- if (!handlers_.empty()) {
- table = Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
- TENURED));
- for (size_t i = 0; i < handlers_.size(); ++i) {
- table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
- table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
- }
- }
-
// Allocate the source position table.
Handle<ByteArray> source_positions =
source_position_table_builder_.ToSourcePositionTable(isolate());
@@ -343,8 +385,9 @@ Handle<Code> CodeGenerator::FinalizeCode() {
Handle<Code> result = isolate()->factory()->NewCode(
desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
- table, source_positions, deopt_data, kMovable, info()->stub_key(), true,
- frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset());
+ source_positions, deopt_data, kMovable, info()->stub_key(), true,
+ frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
+ handler_table_offset_);
isolate()->counters()->total_compiled_code_size()->Increment(
result->instruction_size());
@@ -488,6 +531,77 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
pushes->resize(push_count);
}
+CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
+ InstructionOperand* source, InstructionOperand* destination) {
+ if (source->IsConstant()) {
+ if (destination->IsAnyRegister()) {
+ return MoveType::kConstantToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kConstantToStack;
+ }
+ }
+ DCHECK(LocationOperand::cast(source)->IsCompatible(
+ LocationOperand::cast(destination)));
+ if (source->IsAnyRegister()) {
+ if (destination->IsAnyRegister()) {
+ return MoveType::kRegisterToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kRegisterToStack;
+ }
+ } else {
+ DCHECK(source->IsAnyStackSlot());
+ if (destination->IsAnyRegister()) {
+ return MoveType::kStackToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kStackToStack;
+ }
+ }
+}
+
+CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
+ InstructionOperand* source, InstructionOperand* destination) {
+ DCHECK(LocationOperand::cast(source)->IsCompatible(
+ LocationOperand::cast(destination)));
+ if (source->IsAnyRegister()) {
+ if (destination->IsAnyRegister()) {
+ return MoveType::kRegisterToRegister;
+ } else {
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kRegisterToStack;
+ }
+ } else {
+ DCHECK(source->IsAnyStackSlot());
+ DCHECK(destination->IsAnyStackSlot());
+ return MoveType::kStackToStack;
+ }
+}
+
+RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
+ Instruction* instr) {
+ // Assemble a branch after this instruction.
+ InstructionOperandConverter i(this, instr);
+ RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
+ RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
+
+ if (true_rpo == false_rpo) {
+ return true_rpo;
+ }
+ FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+ if (IsNextInAssemblyOrder(true_rpo)) {
+ // true block is next, can fall through if condition negated.
+ std::swap(true_rpo, false_rpo);
+ condition = NegateFlagsCondition(condition);
+ }
+ branch->condition = condition;
+ branch->true_label = GetLabel(true_rpo);
+ branch->false_label = GetLabel(false_rpo);
+ branch->fallthru = IsNextInAssemblyOrder(false_rpo);
+ return RpoNumber::Invalid();
+}
+
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
Instruction* instr, const InstructionBlock* block) {
int first_unused_stack_slot;
@@ -513,34 +627,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
- case kFlags_branch: {
- // Assemble a branch after this instruction.
- InstructionOperandConverter i(this, instr);
- RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
- RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
-
- if (true_rpo == false_rpo) {
+ case kFlags_branch:
+ case kFlags_branch_and_poison: {
+ BranchInfo branch;
+ RpoNumber target = ComputeBranchInfo(&branch, instr);
+ if (target.IsValid()) {
// redundant branch.
- if (!IsNextInAssemblyOrder(true_rpo)) {
- AssembleArchJump(true_rpo);
+ if (!IsNextInAssemblyOrder(target)) {
+ AssembleArchJump(target);
}
return kSuccess;
}
- if (IsNextInAssemblyOrder(true_rpo)) {
- // true block is next, can fall through if condition negated.
- std::swap(true_rpo, false_rpo);
- condition = NegateFlagsCondition(condition);
- }
- BranchInfo branch;
- branch.condition = condition;
- branch.true_label = GetLabel(true_rpo);
- branch.false_label = GetLabel(false_rpo);
- branch.fallthru = IsNextInAssemblyOrder(false_rpo);
// Assemble architecture-specific branch.
AssembleArchBranch(instr, &branch);
break;
}
- case kFlags_deoptimize: {
+ case kFlags_deoptimize:
+ case kFlags_deoptimize_and_poison: {
// Assemble a conditional eager deoptimization after this instruction.
InstructionOperandConverter i(this, instr);
size_t frame_state_offset = MiscField::decode(instr->opcode());
@@ -555,6 +658,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
// Assemble architecture-specific branch.
AssembleArchDeoptBranch(instr, &branch);
tasm()->bind(&continue_label);
+ if (mode == kFlags_deoptimize_and_poison) {
+ AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
+ }
break;
}
case kFlags_set: {
@@ -570,6 +676,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
break;
}
}
+
+ // TODO(jarin) We should thread the flag through rather than set it.
+ if (instr->IsCall()) {
+ InitializePoisonForLoadsIfNeeded();
+ }
+
return kSuccess;
}
@@ -1078,6 +1190,12 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
return exit;
}
+void CodeGenerator::InitializePoisonForLoadsIfNeeded() {
+ if (load_poisoning_ == LoadPoisoning::kDoPoison) {
+ tasm()->ResetSpeculationPoisonRegister();
+ }
+}
+
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 425ea2ebf2..a91ae0212a 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -86,7 +86,8 @@ class CodeGenerator final : public GapResolver::Assembler {
int start_source_position,
JumpOptimizationInfo* jump_opt,
std::vector<trap_handler::ProtectedInstructionData>*
- protected_instructions);
+ protected_instructions,
+ LoadPoisoning load_poisoning);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -95,7 +96,6 @@ class CodeGenerator final : public GapResolver::Assembler {
Handle<Code> FinalizeCode();
Handle<ByteArray> GetSourcePositionTable();
- MaybeHandle<HandlerTable> GetHandlerTable() const;
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -122,6 +122,7 @@ class CodeGenerator final : public GapResolver::Assembler {
Zone* zone() const { return zone_; }
TurboAssembler* tasm() { return &tasm_; }
size_t GetSafepointTableOffset() const { return safepoints_.GetCodeOffset(); }
+ size_t GetHandlerTableOffset() const { return handler_table_offset_; }
private:
GapResolver* resolver() { return &resolver_; }
@@ -150,11 +151,25 @@ class CodeGenerator final : public GapResolver::Assembler {
// Assemble instructions for the specified block.
CodeGenResult AssembleBlock(const InstructionBlock* block);
+ // Inserts mask update at the beginning of an instruction block if the
+ // predecessor blocks ends with a masking branch.
+ void TryInsertBranchPoisoning(const InstructionBlock* block);
+
+ // Initializes the masking register.
+ // Eventually, this should be always threaded through from the caller
+ // (in the proplogue) or from a callee (after a call).
+ void InitializePoisonForLoadsIfNeeded();
+
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(Instruction* instr,
const InstructionBlock* block);
void AssembleGaps(Instruction* instr);
+ // Compute branch info from given instruction. Returns a valid rpo number
+ // if the branch is redundant, the returned rpo number point to the target
+ // basic block.
+ RpoNumber ComputeBranchInfo(BranchInfo* branch, Instruction* instr);
+
// Returns true if a instruction is a tail call that needs to adjust the stack
// pointer before execution. The stack slot index to the empty slot above the
// adjusted stack pointer is returned in |slot|.
@@ -179,12 +194,26 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
+ // Generates code that checks whether the {kJavaScriptCallCodeStartRegister}
+ // contains the expected pointer to the start of the instruction stream.
+ void AssembleCodeStartRegisterCheck();
+
+ void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
+
// When entering a code that is marked for deoptimization, rather continuing
// with its execution, we jump to a lazy compiled code. We need to do this
// because this code has already been deoptimized and needs to be unlinked
// from the JS functions referring it.
void BailoutIfDeoptimized();
+ // Generates a mask which can be used to poison values when we detect
+ // the code is executing speculatively.
+ void GenerateSpeculationPoison();
+
+ // Generates code to poison the stack pointer and implicit register arguments
+ // like the context register and the function register.
+ void AssembleRegisterArgumentPoisoning();
+
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@@ -224,6 +253,26 @@ class CodeGenerator final : public GapResolver::Assembler {
PushTypeFlags push_type,
ZoneVector<MoveOperands*>* pushes);
+ class MoveType {
+ public:
+ enum Type {
+ kRegisterToRegister,
+ kRegisterToStack,
+ kStackToRegister,
+ kStackToStack,
+ kConstantToRegister,
+ kConstantToStack
+ };
+
+ // Detect what type of move or swap needs to be performed. Note that these
+ // functions do not take into account the representation (Tagged, FP,
+ // ...etc).
+
+ static Type InferMove(InstructionOperand* source,
+ InstructionOperand* destination);
+ static Type InferSwap(InstructionOperand* source,
+ InstructionOperand* destination);
+ };
// Called before a tail call |instr|'s gap moves are assembled and allows
// gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
// need it before gap moves or conversion of certain gap moves into pushes.
@@ -346,6 +395,7 @@ class CodeGenerator final : public GapResolver::Assembler {
ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
size_t inlined_function_count_;
TranslationBuffer translations_;
+ int handler_table_offset_;
int last_lazy_deopt_pc_;
// kArchCallCFunction could be reached either:
@@ -368,10 +418,11 @@ class CodeGenerator final : public GapResolver::Assembler {
SourcePositionTableBuilder source_position_table_builder_;
std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
CodeGenResult result_;
+ LoadPoisoning load_poisoning_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_CODE_GENERATOR_H
+#endif // V8_COMPILER_CODE_GENERATOR_H_
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index d9bc5c8173..2f4888617c 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -38,12 +38,14 @@ Decision DecideCondition(Node* const cond) {
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine)
+ MachineOperatorBuilder* machine,
+ Zone* temp_zone)
: AdvancedReducer(editor),
graph_(graph),
common_(common),
machine_(machine),
- dead_(graph->NewNode(common->Dead())) {
+ dead_(graph->NewNode(common->Dead())),
+ zone_(temp_zone) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -64,6 +66,8 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return ReduceReturn(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
+ case IrOpcode::kSwitch:
+ return ReduceSwitch(node);
default:
break;
}
@@ -138,10 +142,10 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(
- node, condition_is_true ? common()->DeoptimizeIf(p.kind(), p.reason(),
- VectorSlotPair())
- : common()->DeoptimizeUnless(
- p.kind(), p.reason(), VectorSlotPair()));
+ node,
+ condition_is_true
+ ? common()->DeoptimizeIf(p.kind(), p.reason(), p.feedback())
+ : common()->DeoptimizeUnless(p.kind(), p.reason(), p.feedback()));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
@@ -150,8 +154,8 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
ReplaceWithValue(node, dead(), effect, control);
} else {
control = graph()->NewNode(
- common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
- frame_state, effect, control);
+ common()->Deoptimize(p.kind(), p.reason(), p.feedback()), frame_state,
+ effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
@@ -414,6 +418,42 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
return NoChange();
}
+Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ Node* const switched_value = node->InputAt(0);
+ Node* const control = node->InputAt(1);
+
+ // Attempt to constant match the switched value against the IfValue cases. If
+ // no case matches, then use the IfDefault. We don't bother marking
+ // non-matching cases as dead code (same for an unused IfDefault), because the
+ // Switch itself will be marked as dead code.
+ Int32Matcher mswitched(switched_value);
+ if (mswitched.HasValue()) {
+ bool matched = false;
+
+ size_t const projection_count = node->op()->ControlOutputCount();
+ Node** projections = zone_->NewArray<Node*>(projection_count);
+ NodeProperties::CollectControlProjections(node, projections,
+ projection_count);
+ for (size_t i = 0; i < projection_count - 1; i++) {
+ Node* if_value = projections[i];
+ DCHECK_EQ(IrOpcode::kIfValue, if_value->opcode());
+ int32_t value_index = OpParameter<int32_t>(if_value->op());
+ if (value_index == mswitched.Value()) {
+ matched = true;
+ Replace(if_value, control);
+ break;
+ }
+ }
+ if (!matched) {
+ Node* if_default = projections[projection_count - 1];
+ DCHECK_EQ(IrOpcode::kIfDefault, if_default->opcode());
+ Replace(if_default, control);
+ }
+ return Replace(dead());
+ }
+ return NoChange();
+}
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index ea3575aa55..022c4fbe8c 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -26,7 +26,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
public:
CommonOperatorReducer(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine);
+ MachineOperatorBuilder* machine, Zone* temp_zone);
~CommonOperatorReducer() final {}
const char* reducer_name() const override { return "CommonOperatorReducer"; }
@@ -41,6 +41,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction ReducePhi(Node* node);
Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
+ Reduction ReduceSwitch(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
@@ -54,6 +55,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
CommonOperatorBuilder* const common_;
MachineOperatorBuilder* const machine_;
Node* const dead_;
+ Zone* zone_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 54af052d56..36b1caffa6 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -29,10 +29,27 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
UNREACHABLE();
}
+std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
+ switch (is_safety_check) {
+ case IsSafetyCheck::kSafetyCheck:
+ return os << "SafetyCheck";
+ case IsSafetyCheck::kNoSafetyCheck:
+ return os << "NoSafetyCheck";
+ }
+ UNREACHABLE();
+}
-BranchHint BranchHintOf(const Operator* const op) {
+std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
+ return os << info.hint << "|" << info.is_safety_check;
+}
+
+const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kBranch, op->opcode());
- return OpParameter<BranchHint>(op);
+ return OpParameter<BranchOperatorInfo>(op);
+}
+
+BranchHint BranchHintOf(const Operator* const op) {
+ return BranchOperatorInfoOf(op).hint;
}
int ValueInputCountOfReturn(Operator const* const op) {
@@ -44,7 +61,8 @@ int ValueInputCountOfReturn(Operator const* const op) {
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
- lhs.feedback() == rhs.feedback();
+ lhs.feedback() == rhs.feedback() &&
+ lhs.is_safety_check() == rhs.is_safety_check();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -52,11 +70,12 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
}
size_t hash_value(DeoptimizeParameters p) {
- return base::hash_combine(p.kind(), p.reason(), p.feedback());
+ return base::hash_combine(p.kind(), p.reason(), p.feedback(),
+ p.is_safety_check());
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- os << p.kind() << ":" << p.reason();
+ os << p.kind() << ":" << p.reason() << ":" << p.is_safety_check();
if (p.feedback().IsValid()) {
os << "; " << p.feedback();
}
@@ -70,6 +89,32 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
return OpParameter<DeoptimizeParameters>(op);
}
+IsSafetyCheck IsSafetyCheckOf(const Operator* op) {
+ if (op->opcode() == IrOpcode::kBranch) {
+ return BranchOperatorInfoOf(op).is_safety_check;
+ }
+ return DeoptimizeParametersOf(op).is_safety_check();
+}
+
+const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(const Operator* op) {
+ if (op->opcode() == IrOpcode::kBranch) {
+ BranchOperatorInfo info = BranchOperatorInfoOf(op);
+ if (info.is_safety_check == IsSafetyCheck::kSafetyCheck) return op;
+ return Branch(info.hint, IsSafetyCheck::kSafetyCheck);
+ }
+ DeoptimizeParameters p = DeoptimizeParametersOf(op);
+ if (p.is_safety_check() == IsSafetyCheck::kSafetyCheck) return op;
+ switch (op->opcode()) {
+ case IrOpcode::kDeoptimizeIf:
+ return DeoptimizeIf(p.kind(), p.reason(), p.feedback(),
+ IsSafetyCheck::kSafetyCheck);
+ case IrOpcode::kDeoptimizeUnless:
+ return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(),
+ IsSafetyCheck::kSafetyCheck);
+ default:
+ UNREACHABLE();
+ }
+}
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
return lhs.representation() == rhs.representation() &&
@@ -175,7 +220,7 @@ bool operator!=(RelocatablePtrConstantInfo const& lhs,
}
size_t hash_value(RelocatablePtrConstantInfo const& p) {
- return base::hash_combine(p.value(), p.rmode(), p.type());
+ return base::hash_combine(p.value(), int8_t{p.rmode()}, p.type());
}
std::ostream& operator<<(std::ostream& os,
@@ -365,6 +410,14 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0) \
V(Retain, Operator::kKontrol, 1, 1, 0, 0, 1, 0)
+#define CACHED_BRANCH_LIST(V) \
+ V(None, SafetyCheck) \
+ V(True, SafetyCheck) \
+ V(False, SafetyCheck) \
+ V(None, NoSafetyCheck) \
+ V(True, NoSafetyCheck) \
+ V(False, NoSafetyCheck)
+
#define CACHED_RETURN_LIST(V) \
V(1) \
V(2) \
@@ -417,22 +470,28 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
-#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(Eager, DivisionByZero) \
- V(Eager, Hole) \
- V(Eager, MinusZero) \
- V(Eager, Overflow) \
- V(Eager, Smi)
-
-#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(Eager, LostPrecision) \
- V(Eager, LostPrecisionOrNaN) \
- V(Eager, NotAHeapNumber) \
- V(Eager, NotANumberOrOddball) \
- V(Eager, NotASmi) \
- V(Eager, OutOfBounds) \
- V(Eager, WrongInstanceType) \
- V(Eager, WrongMap)
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+ V(Eager, DivisionByZero, NoSafetyCheck) \
+ V(Eager, DivisionByZero, SafetyCheck) \
+ V(Eager, Hole, NoSafetyCheck) \
+ V(Eager, Hole, SafetyCheck) \
+ V(Eager, MinusZero, NoSafetyCheck) \
+ V(Eager, MinusZero, SafetyCheck) \
+ V(Eager, Overflow, NoSafetyCheck) \
+ V(Eager, Overflow, SafetyCheck) \
+ V(Eager, Smi, SafetyCheck)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+ V(Eager, LostPrecision, NoSafetyCheck) \
+ V(Eager, LostPrecision, SafetyCheck) \
+ V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \
+ V(Eager, LostPrecisionOrNaN, SafetyCheck) \
+ V(Eager, NotAHeapNumber, SafetyCheck) \
+ V(Eager, NotANumberOrOddball, SafetyCheck) \
+ V(Eager, NotASmi, SafetyCheck) \
+ V(Eager, OutOfBounds, SafetyCheck) \
+ V(Eager, WrongInstanceType, SafetyCheck) \
+ V(Eager, WrongMap, SafetyCheck)
#define CACHED_TRAP_IF_LIST(V) \
V(TrapDivUnrepresentable) \
@@ -534,18 +593,20 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
- template <BranchHint kBranchHint>
- struct BranchOperator final : public Operator1<BranchHint> {
+ template <BranchHint hint, IsSafetyCheck is_safety_check>
+ struct BranchOperator final : public Operator1<BranchOperatorInfo> {
BranchOperator()
- : Operator1<BranchHint>( // --
- IrOpcode::kBranch, Operator::kKontrol, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- kBranchHint) {} // parameter
+ : Operator1<BranchOperatorInfo>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ BranchOperatorInfo{hint, is_safety_check}) {} // parameter
};
- BranchOperator<BranchHint::kNone> kBranchNoneOperator;
- BranchOperator<BranchHint::kTrue> kBranchTrueOperator;
- BranchOperator<BranchHint::kFalse> kBranchFalseOperator;
+#define CACHED_BRANCH(Hint, IsCheck) \
+ BranchOperator<BranchHint::k##Hint, IsSafetyCheck::k##IsCheck> \
+ kBranch##Hint##IsCheck##Operator;
+ CACHED_BRANCH_LIST(CACHED_BRANCH)
+#undef CACHED_BRANCH
template <int kEffectInputCount>
struct EffectPhiOperator final : public Operator {
@@ -608,7 +669,8 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+ IsSafetyCheck::kNoSafetyCheck)) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -616,7 +678,8 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason,
+ IsSafetyCheck is_safety_check>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -624,15 +687,18 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+ is_safety_check)) {}
};
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
- kDeoptimizeIf##Kind##Reason##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
+ DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason, \
+ IsSafetyCheck::k##IsCheck> \
+ kDeoptimizeIf##Kind##Reason##IsCheck##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason,
+ IsSafetyCheck is_safety_check>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@@ -641,12 +707,14 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair(),
+ is_safety_check)) {}
};
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
- DeoptimizeReason::k##Reason> \
- kDeoptimizeUnless##Kind##Reason##Operator;
+ DeoptimizeReason::k##Reason, \
+ IsSafetyCheck::k##IsCheck> \
+ kDeoptimizeUnless##Kind##Reason##IsCheck##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -806,16 +874,15 @@ const Operator* CommonOperatorBuilder::Return(int value_input_count) {
value_input_count + 1, 1, 1, 0, 0, 1); // counts
}
-
-const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
- switch (hint) {
- case BranchHint::kNone:
- return &cache_.kBranchNoneOperator;
- case BranchHint::kTrue:
- return &cache_.kBranchTrueOperator;
- case BranchHint::kFalse:
- return &cache_.kBranchFalseOperator;
+const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
+ IsSafetyCheck is_safety_check) {
+#define CACHED_BRANCH(Hint, IsCheck) \
+ if (hint == BranchHint::k##Hint && \
+ is_safety_check == IsSafetyCheck::k##IsCheck) { \
+ return &cache_.kBranch##Hint##IsCheck##Operator; \
}
+ CACHED_BRANCH_LIST(CACHED_BRANCH)
+#undef CACHED_BRANCH
UNREACHABLE();
}
@@ -830,7 +897,8 @@ const Operator* CommonOperatorBuilder::Deoptimize(
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(kind, reason, feedback,
+ IsSafetyCheck::kNoSafetyCheck);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -841,16 +909,17 @@ const Operator* CommonOperatorBuilder::Deoptimize(
const Operator* CommonOperatorBuilder::DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
+ VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && \
+ is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -861,16 +930,17 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+ VectorSlotPair const& feedback, IsSafetyCheck is_safety_check) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && \
+ is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback);
+ DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1299,65 +1369,70 @@ const Operator* CommonOperatorBuilder::FrameState(
state_info); // parameter
}
-
-const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
+const Operator* CommonOperatorBuilder::Call(
+ const CallDescriptor* call_descriptor) {
class CallOperator final : public Operator1<const CallDescriptor*> {
public:
- explicit CallOperator(const CallDescriptor* descriptor)
+ explicit CallOperator(const CallDescriptor* call_descriptor)
: Operator1<const CallDescriptor*>(
- IrOpcode::kCall, descriptor->properties(), "Call",
- descriptor->InputCount() + descriptor->FrameStateCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfEliminatable(descriptor->properties()),
- descriptor->ReturnCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
+ IrOpcode::kCall, call_descriptor->properties(), "Call",
+ call_descriptor->InputCount() +
+ call_descriptor->FrameStateCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfEliminatable(call_descriptor->properties()),
+ call_descriptor->ReturnCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfNoThrow(call_descriptor->properties()),
+ call_descriptor) {}
void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
- return new (zone()) CallOperator(descriptor);
+ return new (zone()) CallOperator(call_descriptor);
}
const Operator* CommonOperatorBuilder::CallWithCallerSavedRegisters(
- const CallDescriptor* descriptor) {
+ const CallDescriptor* call_descriptor) {
class CallOperator final : public Operator1<const CallDescriptor*> {
public:
- explicit CallOperator(const CallDescriptor* descriptor)
+ explicit CallOperator(const CallDescriptor* call_descriptor)
: Operator1<const CallDescriptor*>(
- IrOpcode::kCallWithCallerSavedRegisters, descriptor->properties(),
- "CallWithCallerSavedRegisters",
- descriptor->InputCount() + descriptor->FrameStateCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfEliminatable(descriptor->properties()),
- descriptor->ReturnCount(),
- Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
+ IrOpcode::kCallWithCallerSavedRegisters,
+ call_descriptor->properties(), "CallWithCallerSavedRegisters",
+ call_descriptor->InputCount() +
+ call_descriptor->FrameStateCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfEliminatable(call_descriptor->properties()),
+ call_descriptor->ReturnCount(),
+ Operator::ZeroIfPure(call_descriptor->properties()),
+ Operator::ZeroIfNoThrow(call_descriptor->properties()),
+ call_descriptor) {}
void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
- return new (zone()) CallOperator(descriptor);
+ return new (zone()) CallOperator(call_descriptor);
}
const Operator* CommonOperatorBuilder::TailCall(
- const CallDescriptor* descriptor) {
+ const CallDescriptor* call_descriptor) {
class TailCallOperator final : public Operator1<const CallDescriptor*> {
public:
- explicit TailCallOperator(const CallDescriptor* descriptor)
+ explicit TailCallOperator(const CallDescriptor* call_descriptor)
: Operator1<const CallDescriptor*>(
IrOpcode::kTailCall,
- descriptor->properties() | Operator::kNoThrow, "TailCall",
- descriptor->InputCount() + descriptor->FrameStateCount(), 1, 1, 0,
- 0, 1, descriptor) {}
+ call_descriptor->properties() | Operator::kNoThrow, "TailCall",
+ call_descriptor->InputCount() +
+ call_descriptor->FrameStateCount(),
+ 1, 1, 0, 0, 1, call_descriptor) {}
void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
- return new (zone()) TailCallOperator(descriptor);
+ return new (zone()) TailCallOperator(call_descriptor);
}
const Operator* CommonOperatorBuilder::Projection(size_t index) {
@@ -1412,6 +1487,7 @@ const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
}
#undef COMMON_CACHED_OP_LIST
+#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST
#undef CACHED_END_LIST
#undef CACHED_EFFECT_PHI_LIST
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 0e0614dced..b753ed88dc 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -45,6 +45,31 @@ inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
+enum class IsSafetyCheck : uint8_t { kSafetyCheck, kNoSafetyCheck };
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck);
+inline size_t hash_value(IsSafetyCheck is_safety_check) {
+ return static_cast<size_t>(is_safety_check);
+}
+
+struct BranchOperatorInfo {
+ BranchHint hint;
+ IsSafetyCheck is_safety_check;
+};
+
+inline size_t hash_value(const BranchOperatorInfo& info) {
+ return base::hash_combine(info.hint, info.is_safety_check);
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo);
+
+inline bool operator==(const BranchOperatorInfo& a,
+ const BranchOperatorInfo& b) {
+ return a.hint == b.hint && a.is_safety_check == b.is_safety_check;
+}
+
+V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
+ const Operator* const);
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
// Helper function for return nodes, because returns have a hidden value input.
@@ -54,17 +79,23 @@ int ValueInputCountOfReturn(Operator const* const op);
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback)
- : kind_(kind), reason_(reason), feedback_(feedback) {}
+ VectorSlotPair const& feedback,
+ IsSafetyCheck is_safety_check)
+ : kind_(kind),
+ reason_(reason),
+ feedback_(feedback),
+ is_safety_check_(is_safety_check) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const VectorSlotPair& feedback() const { return feedback_; }
+ IsSafetyCheck is_safety_check() const { return is_safety_check_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
VectorSlotPair const feedback_;
+ IsSafetyCheck is_safety_check_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -76,6 +107,7 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const);
+IsSafetyCheck IsSafetyCheckOf(const Operator* op);
class SelectParameters final {
public:
@@ -355,7 +387,9 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
const Operator* End(size_t control_input_count);
- const Operator* Branch(BranchHint = BranchHint::kNone);
+ const Operator* Branch(
+ BranchHint = BranchHint::kNone,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@@ -366,10 +400,14 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
VectorSlotPair const& feedback);
- const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback);
- const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback);
+ const Operator* DeoptimizeIf(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ const Operator* DeoptimizeUnless(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
const Operator* TrapIf(int32_t trap_id);
const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
@@ -421,10 +459,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* FrameState(BailoutId bailout_id,
OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info);
- const Operator* Call(const CallDescriptor* descriptor);
+ const Operator* Call(const CallDescriptor* call_descriptor);
const Operator* CallWithCallerSavedRegisters(
- const CallDescriptor* descriptor);
- const Operator* TailCall(const CallDescriptor* descriptor);
+ const CallDescriptor* call_descriptor);
+ const Operator* TailCall(const CallDescriptor* call_descriptor);
const Operator* Projection(size_t index);
const Operator* Retain();
const Operator* TypeGuard(Type* type);
@@ -438,6 +476,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
FrameStateType type, int parameter_count, int local_count,
Handle<SharedFunctionInfo> shared_info);
+ const Operator* MarkAsSafetyCheck(const Operator* op);
+
private:
Zone* zone() const { return zone_; }
@@ -447,6 +487,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
};
+// This should go into some common compiler header, but we do not have such a
+// thing at the moment.
+enum class LoadPoisoning { kDoPoison, kDontPoison };
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 523d37fe29..6c54f2b036 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -139,18 +139,26 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
if (live_input_count == 0) {
return Replace(dead());
} else if (live_input_count == 1) {
+ NodeVector loop_exits(zone_);
// Due to compaction above, the live input is at offset 0.
for (Node* const use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
Replace(use, use->InputAt(0));
} else if (use->opcode() == IrOpcode::kLoopExit &&
use->InputAt(1) == node) {
- RemoveLoopExit(use);
+ // Remember the loop exits so that we can mark their loop input dead.
+ // This has to be done after the use list iteration so that we do
+ // not mutate the use list while it is being iterated.
+ loop_exits.push_back(use);
} else if (use->opcode() == IrOpcode::kTerminate) {
DCHECK_EQ(IrOpcode::kLoop, node->opcode());
Replace(use, dead());
}
}
+ for (Node* loop_exit : loop_exits) {
+ loop_exit->ReplaceInput(1, dead());
+ Revisit(loop_exit);
+ }
return Replace(node->InputAt(0));
}
DCHECK_LE(2, live_input_count);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index a47941e28d..290a3b5f34 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -726,9 +726,8 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
break;
case IrOpcode::kCheckedTaggedSignedToInt32:
if (frame_state == nullptr) {
- V8_Fatal(__FILE__, __LINE__, "No frame state (zapped by #%d: %s)",
- frame_state_zapper_->id(),
- frame_state_zapper_->op()->mnemonic());
+ FATAL("No frame state (zapped by #%d: %s)", frame_state_zapper_->id(),
+ frame_state_zapper_->op()->mnemonic());
}
result = LowerCheckedTaggedSignedToInt32(node, frame_state);
break;
@@ -807,9 +806,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTypeOf:
result = LowerTypeOf(node);
break;
- case IrOpcode::kClassOf:
- result = LowerClassOf(node);
- break;
case IrOpcode::kNewDoubleElements:
result = LowerNewDoubleElements(node);
break;
@@ -830,6 +826,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
break;
case IrOpcode::kDeadValue:
result = LowerDeadValue(node);
+ break;
case IrOpcode::kStringFromCharCode:
result = LowerStringFromCharCode(node);
break;
@@ -855,10 +852,10 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerSeqStringCharCodeAt(node);
break;
case IrOpcode::kStringCodePointAt:
- result = LowerStringCodePointAt(node);
+ result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op()));
break;
case IrOpcode::kSeqStringCodePointAt:
- result = LowerSeqStringCharCodeAt(node);
+ result = LowerSeqStringCodePointAt(node, UnicodeEncodingOf(node->op()));
break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
@@ -866,6 +863,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringToUpperCaseIntl:
result = LowerStringToUpperCaseIntl(node);
break;
+ case IrOpcode::kStringSubstring:
+ result = LowerStringSubstring(node);
+ break;
case IrOpcode::kStringEqual:
result = LowerStringEqual(node);
break;
@@ -972,10 +972,10 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
}
if ((result ? 1 : 0) != node->op()->ValueOutputCount()) {
- V8_Fatal(__FILE__, __LINE__,
- "Effect control linearizer lowering of '%s':"
- " value output count does not agree.",
- node->op()->mnemonic());
+ FATAL(
+ "Effect control linearizer lowering of '%s':"
+ " value output count does not agree.",
+ node->op()->mnemonic());
}
*effect = gasm()->ExtractCurrentEffect();
@@ -1365,10 +1365,10 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
Node* result =
- __ Call(desc, __ CEntryStubConstant(1), value,
+ __ Call(call_descriptor, __ CEntryStubConstant(1), value,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
@@ -1495,8 +1495,8 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* check = __ Uint32LessThan(value_instance_type,
__ Uint32Constant(FIRST_NONSTRING_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
- check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAString, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1935,7 +1935,8 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
Node* frame_state) {
- CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
+ CheckTaggedInputParameters const& p =
+ CheckTaggedInputParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_smi = __ MakeLabel();
@@ -1947,7 +1948,7 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
// In the Smi case, just convert to int32 and then float64.
// Otherwise, check heap numberness and load the number.
Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
- mode, VectorSlotPair(), value, frame_state);
+ p.mode(), p.feedback(), value, frame_state);
__ Goto(&done, number);
__ Bind(&if_smi);
@@ -2042,9 +2043,9 @@ Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kNumberToString);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), argument,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), argument,
__ NoContextConstant());
}
@@ -2379,21 +2380,9 @@ Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
Callable const callable = Builtins::CallableFor(isolate(), Builtins::kTypeof);
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), obj,
- __ NoContextConstant());
-}
-
-Node* EffectControlLinearizer::LowerClassOf(Node* node) {
- Node* obj = node->InputAt(0);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kClassOf);
- Operator::Properties const properties = Operator::kEliminatable;
- CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), obj,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2403,9 +2392,9 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kToBoolean);
Operator::Properties const properties = Operator::kEliminatable;
CallDescriptor::Flags const flags = CallDescriptor::kNoAllocate;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), obj,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
__ NoContextConstant());
}
@@ -2583,10 +2572,10 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kNewArgumentsElements);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), frame, length,
- __ SmiConstant(mapped_count), __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), frame,
+ length, __ SmiConstant(mapped_count), __ NoContextConstant());
}
Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
@@ -2656,9 +2645,9 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kSameValue);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
@@ -2678,9 +2667,9 @@ Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringToNumber);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), string,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
__ NoContextConstant());
}
@@ -2692,44 +2681,174 @@ Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringCharAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
+ position, __ NoContextConstant());
}
Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kStringCharCodeAt);
- Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
- MachineType::TaggedSigned());
- return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
- __ NoContextConstant());
+ // We need a loop here to properly deal with indirect strings
+ // (SlicedString, ConsString and ThinString).
+ auto loop = __ MakeLoopLabel(MachineRepresentation::kTagged,
+ MachineRepresentation::kWord32);
+ auto loop_next = __ MakeLabel(MachineRepresentation::kTagged,
+ MachineRepresentation::kWord32);
+ auto loop_done = __ MakeLabel(MachineRepresentation::kWord32);
+ __ Goto(&loop, receiver, position);
+ __ Bind(&loop);
+ {
+ Node* receiver = loop.PhiAt(0);
+ Node* position = loop.PhiAt(1);
+ Node* receiver_map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* receiver_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), receiver_map);
+ Node* receiver_representation = __ Word32And(
+ receiver_instance_type, __ Int32Constant(kStringRepresentationMask));
+
+ // Dispatch on the current {receiver}s string representation.
+ auto if_seqstring = __ MakeLabel();
+ auto if_consstring = __ MakeLabel();
+ auto if_thinstring = __ MakeLabel();
+ auto if_externalstring = __ MakeLabel();
+ auto if_slicedstring = __ MakeLabel();
+ auto if_runtime = __ MakeDeferredLabel();
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kSeqStringTag)),
+ &if_seqstring);
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kConsStringTag)),
+ &if_consstring);
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kThinStringTag)),
+ &if_thinstring);
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kExternalStringTag)),
+ &if_externalstring);
+ __ Branch(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kSlicedStringTag)),
+ &if_slicedstring, &if_runtime);
+
+ __ Bind(&if_seqstring);
+ {
+ Node* receiver_is_onebyte = __ Word32Equal(
+ __ Word32Equal(__ Word32And(receiver_instance_type,
+ __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kTwoByteStringTag)),
+ __ Int32Constant(0));
+ Node* result = LoadFromSeqString(receiver, position, receiver_is_onebyte);
+ __ Goto(&loop_done, result);
+ }
+
+ __ Bind(&if_thinstring);
+ {
+ Node* receiver_actual =
+ __ LoadField(AccessBuilder::ForThinStringActual(), receiver);
+ __ Goto(&loop_next, receiver_actual, position);
+ }
+
+ __ Bind(&if_consstring);
+ {
+ Node* receiver_second =
+ __ LoadField(AccessBuilder::ForConsStringSecond(), receiver);
+ __ GotoIfNot(__ WordEqual(receiver_second, __ EmptyStringConstant()),
+ &if_runtime);
+ Node* receiver_first =
+ __ LoadField(AccessBuilder::ForConsStringFirst(), receiver);
+ __ Goto(&loop_next, receiver_first, position);
+ }
+
+ __ Bind(&if_externalstring);
+ {
+ // We need to bailout to the runtime for short external strings.
+ __ GotoIf(__ Word32Equal(
+ __ Word32And(receiver_instance_type,
+ __ Int32Constant(kShortExternalStringMask)),
+ __ Int32Constant(kShortExternalStringTag)),
+ &if_runtime);
+
+ Node* receiver_data = __ LoadField(
+ AccessBuilder::ForExternalStringResourceData(), receiver);
+
+ auto if_onebyte = __ MakeLabel();
+ auto if_twobyte = __ MakeLabel();
+ __ Branch(
+ __ Word32Equal(__ Word32And(receiver_instance_type,
+ __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kTwoByteStringTag)),
+ &if_twobyte, &if_onebyte);
+
+ __ Bind(&if_onebyte);
+ {
+ Node* result = __ Load(MachineType::Uint8(), receiver_data,
+ ChangeInt32ToIntPtr(position));
+ __ Goto(&loop_done, result);
+ }
+
+ __ Bind(&if_twobyte);
+ {
+ Node* result = __ Load(
+ MachineType::Uint16(), receiver_data,
+ __ Word32Shl(ChangeInt32ToIntPtr(position), __ Int32Constant(1)));
+ __ Goto(&loop_done, result);
+ }
+ }
+
+ __ Bind(&if_slicedstring);
+ {
+ Node* receiver_offset =
+ __ LoadField(AccessBuilder::ForSlicedStringOffset(), receiver);
+ Node* receiver_parent =
+ __ LoadField(AccessBuilder::ForSlicedStringParent(), receiver);
+ __ Goto(&loop_next, receiver_parent,
+ __ Int32Add(position, ChangeSmiToInt32(receiver_offset)));
+ }
+
+ __ Bind(&if_runtime);
+ {
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kStringCharCodeAt;
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ Node* result =
+ __ Call(call_descriptor, __ CEntryStubConstant(1), receiver,
+ ChangeInt32ToSmi(position),
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(2), __ NoContextConstant());
+ __ Goto(&loop_done, ChangeSmiToInt32(result));
+ }
+
+ __ Bind(&loop_next);
+ __ Goto(&loop, loop_next.PhiAt(0), loop_next.PhiAt(1));
+ }
+ __ Bind(&loop_done);
+ return loop_done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(
+ Node* node, UnicodeEncoding encoding) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
+ Builtins::Name builtin = encoding == UnicodeEncoding::UTF16
+ ? Builtins::kStringCodePointAtUTF16
+ : Builtins::kStringCodePointAtUTF32;
+
+ Callable const callable = Builtins::CallableFor(isolate(), builtin);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
MachineType::TaggedSigned());
- return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
+ position, __ NoContextConstant());
}
-Node* EffectControlLinearizer::LoadFromString(Node* receiver, Node* position,
- Node* is_one_byte) {
+Node* EffectControlLinearizer::LoadFromSeqString(Node* receiver, Node* position,
+ Node* is_one_byte) {
auto one_byte_load = __ MakeLabel();
auto done = __ MakeLabel(MachineRepresentation::kWord32);
__ GotoIf(is_one_byte, &one_byte_load);
@@ -2756,7 +2875,7 @@ Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
__ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
__ Int32Constant(kOneByteStringTag));
- return LoadFromString(receiver, position, is_one_byte);
+ return LoadFromSeqString(receiver, position, is_one_byte);
}
Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
@@ -2770,7 +2889,7 @@ Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
__ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
__ Int32Constant(kOneByteStringTag));
- Node* first_char_code = LoadFromString(receiver, position, is_one_byte);
+ Node* first_char_code = LoadFromSeqString(receiver, position, is_one_byte);
auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
@@ -2779,16 +2898,18 @@ Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
__ Word32Equal(__ Word32And(first_char_code, __ Int32Constant(0xFC00)),
__ Int32Constant(0xD800));
// Return first character code.
- __ GotoIf(first_out, &return_result, first_char_code);
+ __ GotoIfNot(first_out, &return_result, first_char_code);
// Check if position + 1 is still in range.
- Node* length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
+ Node* length = ChangeSmiToInt32(
+ __ LoadField(AccessBuilder::ForStringLength(), receiver));
Node* next_position = __ Int32Add(position, __ Int32Constant(1));
Node* next_position_in_range = __ Int32LessThan(next_position, length);
- __ GotoIf(next_position_in_range, &return_result, first_char_code);
+ __ GotoIfNot(next_position_in_range, &return_result, first_char_code);
// Load second character code.
- Node* second_char_code = LoadFromString(receiver, next_position, is_one_byte);
- // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* second_char_code =
+ LoadFromSeqString(receiver, next_position, is_one_byte);
+ // Check if second character code is outside of interval [0xDC00, 0xDFFF].
Node* second_out =
__ Word32Equal(__ Word32And(second_char_code, __ Int32Constant(0xFC00)),
__ Int32Constant(0xDC00));
@@ -2862,12 +2983,12 @@ Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
{
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kStringCharFromCode;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- Node* vtrue1 =
- __ Call(desc, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
- __ ExternalConstant(ExternalReference(id, isolate())),
- __ Int32Constant(1), __ NoContextConstant());
+ Node* vtrue1 = __ Call(
+ call_descriptor, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
__ Goto(&done, vtrue1);
}
__ Bind(&done);
@@ -2883,9 +3004,9 @@ Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringToLowerCaseIntl);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), receiver,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
__ NoContextConstant());
}
@@ -2893,9 +3014,9 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
Node* receiver = node->InputAt(0);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kStringToUpperCaseIntl;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- return __ Call(desc, __ CEntryStubConstant(1), receiver,
+ return __ Call(call_descriptor, __ CEntryStubConstant(1), receiver,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
}
@@ -3048,10 +3169,10 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringIndexOf);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), subject, search_string,
- position, __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), subject,
+ search_string, position, __ NoContextConstant());
}
Node* EffectControlLinearizer::LowerStringLength(Node* node) {
@@ -3067,12 +3188,27 @@ Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringSubstring(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* start = ChangeInt32ToIntPtr(node->InputAt(1));
+ Node* end = ChangeInt32ToIntPtr(node->InputAt(2));
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringSubstring);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
+ start, end, __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
return LowerStringComparison(
Builtins::CallableFor(isolate(), Builtins::kStringEqual), node);
@@ -3176,10 +3312,10 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
builder.AddParam(MachineType::AnyTagged());
Node* try_internalize_string_function = __ ExternalConstant(
ExternalReference::try_internalize_string_function(isolate()));
- CallDescriptor const* const desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
- Node* val_internalized =
- __ Call(common()->Call(desc), try_internalize_string_function, val);
+ Node* val_internalized = __ Call(common()->Call(call_descriptor),
+ try_internalize_string_function, val);
// Now see if the results match.
__ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
@@ -3218,10 +3354,14 @@ Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
}
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
+ return __ WordShl(ChangeInt32ToIntPtr(value), SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToIntPtr(Node* value) {
if (machine()->Is64()) {
value = __ ChangeInt32ToInt64(value);
}
- return __ WordShl(value, SmiShiftBitsConstant());
+ return value;
}
Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
@@ -3350,10 +3490,10 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCopyFastSmiOrObjectElements);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- Node* result = __ Call(desc, __ HeapConstant(callable.code()), object,
- __ NoContextConstant());
+ Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ object, __ NoContextConstant());
__ Goto(&done, result);
__ Bind(&done);
@@ -3386,11 +3526,12 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
: Builtins::CallableFor(isolate(),
Builtins::kGrowFastSmiOrObjectElements);
CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
properties);
- Node* new_elements = __ Call(desc, __ HeapConstant(callable.code()), object,
- ChangeInt32ToSmi(index), __ NoContextConstant());
+ Node* new_elements =
+ __ Call(call_descriptor, __ HeapConstant(callable.code()), object,
+ ChangeInt32ToSmi(index), __ NoContextConstant());
// Ensure that we were able to grow the {elements}.
__ DeoptimizeIf(DeoptimizeReason::kCouldNotGrowElements, params.feedback(),
@@ -3429,9 +3570,9 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
// Instance migration, call out to the runtime for {object}.
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTransitionElementsKind;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1), object, target_map,
+ __ Call(call_descriptor, __ CEntryStubConstant(1), object, target_map,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(2), __ NoContextConstant());
break;
@@ -3557,8 +3698,9 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
// Compute the effective storage pointer, handling the case where the
// {external} pointer is the effective storage pointer (i.e. the {base}
// is Smi zero).
- Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
- base, external);
+ Node* storage = IntPtrMatcher(base).Is(0)
+ ? external
+ : __ UnsafePointerAdd(base, external);
// Perform the actual typed element access.
return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
@@ -3580,8 +3722,9 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
// Compute the effective storage pointer, handling the case where the
// {external} pointer is the effective storage pointer (i.e. the {base}
// is Smi zero).
- Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
- base, external);
+ Node* storage = IntPtrMatcher(base).Is(0)
+ ? external
+ : __ UnsafePointerAdd(base, external);
// Perform the actual typed element access.
__ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
@@ -3604,9 +3747,9 @@ void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
// Instance migration, call out to the runtime for {array}.
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTransitionElementsKind;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1), array, target_map,
+ __ Call(call_descriptor, __ CEntryStubConstant(1), array, target_map,
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(2), __ NoContextConstant());
}
@@ -3951,9 +4094,9 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
AbortReason reason = AbortReasonOf(node->op());
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kAbort;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1),
+ __ Call(call_descriptor, __ CEntryStubConstant(1),
jsgraph()->SmiConstant(static_cast<int>(reason)),
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
@@ -3988,13 +4131,13 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
- Node* result = __ Call(desc, __ HeapConstant(callable.code()), value,
- native_context);
+ Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ value, native_context);
__ Goto(&done_convert, result);
__ Bind(&done_convert);
@@ -4024,13 +4167,13 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
properties);
Node* native_context = __ LoadField(
AccessBuilder::ForJSGlobalProxyNativeContext(), global_proxy);
- Node* result = __ Call(desc, __ HeapConstant(callable.code()), value,
- native_context);
+ Node* result = __ Call(call_descriptor, __ HeapConstant(callable.code()),
+ value, native_context);
__ Goto(&done_convert, result);
// Replace the {value} with the {global_proxy}.
@@ -4381,11 +4524,11 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kFindOrderedHashMapEntry);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
properties);
- return __ Call(desc, __ HeapConstant(callable.code()), table, key,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), table,
+ key, __ NoContextConstant());
}
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 47b1586d6d..21425d3ab0 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -119,13 +119,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerSeqStringCharCodeAt(Node* node);
- Node* LowerStringCodePointAt(Node* node);
+ Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerSeqStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
+ Node* LowerStringSubstring(Node* node);
Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
@@ -136,7 +137,6 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
Node* LowerTypeOf(Node* node);
- Node* LowerClassOf(Node* node);
Node* LowerToBoolean(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
@@ -176,13 +176,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
Node* ChangeInt32ToSmi(Node* value);
+ Node* ChangeInt32ToIntPtr(Node* value);
Node* ChangeIntPtrToInt32(Node* value);
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
- Node* LoadFromString(Node* receiver, Node* position, Node* is_one_byte);
+ Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 16a9d78faf..66715b9a94 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -218,9 +218,8 @@ void EscapeAnalysisReducer::VerifyReplacement() const {
if (const VirtualObject* vobject =
analysis_result().GetVirtualObject(node)) {
if (!vobject->HasEscaped()) {
- V8_Fatal(__FILE__, __LINE__,
- "Escape analysis failed to remove node %s#%d\n",
- node->op()->mnemonic(), node->id());
+ FATAL("Escape analysis failed to remove node %s#%d\n",
+ node->op()->mnemonic(), node->id());
}
}
}
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 0a0e3ec868..7d55cc29d3 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -79,12 +79,15 @@ std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
}
namespace {
+
Node* CreateBuiltinContinuationFrameStateCommon(
- JSGraph* js_graph, Builtins::Name name, Node* context, Node** parameters,
- int parameter_count, Node* outer_frame_state, Handle<JSFunction> function) {
- Isolate* isolate = js_graph->isolate();
- Graph* graph = js_graph->graph();
- CommonOperatorBuilder* common = js_graph->common();
+ JSGraph* jsgraph, FrameStateType frame_type, Builtins::Name name,
+ Node* closure, Node* context, Node** parameters, int parameter_count,
+ Node* outer_frame_state,
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>()) {
+ Isolate* const isolate = jsgraph->isolate();
+ Graph* const graph = jsgraph->graph();
+ CommonOperatorBuilder* const common = jsgraph->common();
BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
Callable callable = Builtins::CallableFor(isolate, name);
@@ -93,35 +96,26 @@ Node* CreateBuiltinContinuationFrameStateCommon(
common->StateValues(parameter_count, SparseInputMask::Dense());
Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
- FrameStateType frame_type =
- function.is_null() ? FrameStateType::kBuiltinContinuation
- : FrameStateType::kJavaScriptBuiltinContinuation;
const FrameStateFunctionInfo* state_info =
- common->CreateFrameStateFunctionInfo(
- frame_type, parameter_count, 0,
- function.is_null() ? Handle<SharedFunctionInfo>()
- : Handle<SharedFunctionInfo>(function->shared()));
+ common->CreateFrameStateFunctionInfo(frame_type, parameter_count, 0,
+ shared);
const Operator* op = common->FrameState(
bailout_id, OutputFrameStateCombine::Ignore(), state_info);
- Node* function_node = function.is_null() ? js_graph->UndefinedConstant()
- : js_graph->HeapConstant(function);
-
Node* frame_state = graph->NewNode(
- op, params_node, js_graph->EmptyStateValues(),
- js_graph->EmptyStateValues(), context, function_node, outer_frame_state);
+ op, params_node, jsgraph->EmptyStateValues(), jsgraph->EmptyStateValues(),
+ context, closure, outer_frame_state);
return frame_state;
}
+
} // namespace
-Node* CreateStubBuiltinContinuationFrameState(JSGraph* js_graph,
- Builtins::Name name,
- Node* context, Node** parameters,
- int parameter_count,
- Node* outer_frame_state,
- ContinuationFrameStateMode mode) {
- Isolate* isolate = js_graph->isolate();
+Node* CreateStubBuiltinContinuationFrameState(
+ JSGraph* jsgraph, Builtins::Name name, Node* context,
+ Node* const* parameters, int parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode) {
+ Isolate* isolate = jsgraph->isolate();
Callable callable = Builtins::CallableFor(isolate, name);
CallInterfaceDescriptor descriptor = callable.descriptor();
@@ -142,18 +136,18 @@ Node* CreateStubBuiltinContinuationFrameState(JSGraph* js_graph,
}
return CreateBuiltinContinuationFrameStateCommon(
- js_graph, name, context, actual_parameters.data(),
- static_cast<int>(actual_parameters.size()), outer_frame_state,
- Handle<JSFunction>());
+ jsgraph, FrameStateType::kBuiltinContinuation, name,
+ jsgraph->UndefinedConstant(), context, actual_parameters.data(),
+ static_cast<int>(actual_parameters.size()), outer_frame_state);
}
Node* CreateJavaScriptBuiltinContinuationFrameState(
- JSGraph* js_graph, Handle<JSFunction> function, Builtins::Name name,
- Node* target, Node* context, Node** stack_parameters,
+ JSGraph* jsgraph, Handle<SharedFunctionInfo> shared, Builtins::Name name,
+ Node* target, Node* context, Node* const* stack_parameters,
int stack_parameter_count, Node* outer_frame_state,
ContinuationFrameStateMode mode) {
- Isolate* isolate = js_graph->isolate();
- Callable callable = Builtins::CallableFor(isolate, name);
+ Isolate* const isolate = jsgraph->isolate();
+ Callable const callable = Builtins::CallableFor(isolate, name);
// Lazy deopt points where the frame state is assocated with a call get an
// additional parameter for the return result from the call that's added by
@@ -165,8 +159,8 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
(mode == ContinuationFrameStateMode::EAGER ? 0 : 1));
Node* argc =
- js_graph->Constant(stack_parameter_count -
- (mode == ContinuationFrameStateMode::EAGER ? 1 : 0));
+ jsgraph->Constant(stack_parameter_count -
+ (mode == ContinuationFrameStateMode::EAGER ? 1 : 0));
// Stack parameters first. They must be first because the receiver is expected
// to be the second value in the translation when creating stack crawls
@@ -179,12 +173,13 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
// Register parameters follow stack paraemters. The context will be added by
// instruction selector during FrameState translation.
actual_parameters.push_back(target);
- actual_parameters.push_back(js_graph->UndefinedConstant());
+ actual_parameters.push_back(jsgraph->UndefinedConstant());
actual_parameters.push_back(argc);
return CreateBuiltinContinuationFrameStateCommon(
- js_graph, name, context, &actual_parameters[0],
- static_cast<int>(actual_parameters.size()), outer_frame_state, function);
+ jsgraph, FrameStateType::kJavaScriptBuiltinContinuation, name, target,
+ context, &actual_parameters[0],
+ static_cast<int>(actual_parameters.size()), outer_frame_state, shared);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index ac00f8c129..fb3d42ff41 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -145,16 +145,14 @@ static const int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
enum class ContinuationFrameStateMode { EAGER, LAZY };
-Node* CreateStubBuiltinContinuationFrameState(JSGraph* graph,
- Builtins::Name name,
- Node* context, Node** parameters,
- int parameter_count,
- Node* outer_frame_state,
- ContinuationFrameStateMode mode);
+Node* CreateStubBuiltinContinuationFrameState(
+ JSGraph* graph, Builtins::Name name, Node* context, Node* const* parameters,
+ int parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode);
Node* CreateJavaScriptBuiltinContinuationFrameState(
- JSGraph* graph, Handle<JSFunction> function, Builtins::Name name,
- Node* target, Node* context, Node** stack_parameters,
+ JSGraph* graph, Handle<SharedFunctionInfo> shared, Builtins::Name name,
+ Node* target, Node* context, Node* const* stack_parameters,
int stack_parameter_count, Node* outer_frame_state,
ContinuationFrameStateMode mode);
diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h
new file mode 100644
index 0000000000..2345f1d360
--- /dev/null
+++ b/deps/v8/src/compiler/functional-list.h
@@ -0,0 +1,122 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FUNCTIONAL_LIST_H_
+#define V8_COMPILER_FUNCTIONAL_LIST_H_
+
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A generic stack implemented as a purely functional singly-linked list, which
+// results in an O(1) copy operation. It is the equivalent of functional lists
+// in ML-like languages, with the only difference that it also caches the length
+// of the list in each node.
+// TODO(tebbi): Use this implementation also for RedundancyElimination.
+template <class A>
+class FunctionalList {
+ private:
+ struct Cons : ZoneObject {
+ Cons(A top, Cons* rest)
+ : top(std::move(top)), rest(rest), size(1 + (rest ? rest->size : 0)) {}
+ A const top;
+ Cons* const rest;
+ size_t const size;
+ };
+
+ public:
+ FunctionalList() : elements_(nullptr) {}
+
+ bool operator==(const FunctionalList<A>& other) const {
+ if (Size() != other.Size()) return false;
+ iterator it = begin();
+ iterator other_it = other.begin();
+ while (true) {
+ if (it == other_it) return true;
+ if (*it != *other_it) return false;
+ ++it;
+ ++other_it;
+ }
+ }
+ bool operator!=(const FunctionalList<A>& other) const {
+ return !(*this == other);
+ }
+
+ const A& Front() const {
+ DCHECK_GT(Size(), 0);
+ return elements_->top;
+ }
+
+ FunctionalList Rest() const {
+ FunctionalList result = *this;
+ result.DropFront();
+ return result;
+ }
+
+ void DropFront() {
+ CHECK_GT(Size(), 0);
+ elements_ = elements_->rest;
+ }
+
+ void PushFront(A a, Zone* zone) {
+ elements_ = new (zone) Cons(std::move(a), elements_);
+ }
+
+ // If {hint} happens to be exactly what we want to allocate, avoid allocation
+ // by reusing {hint}.
+ void PushFront(A a, Zone* zone, FunctionalList hint) {
+ if (hint.Size() == Size() + 1 && hint.Front() == a &&
+ hint.Rest() == *this) {
+ *this = hint;
+ } else {
+ PushFront(a, zone);
+ }
+ }
+
+ // Drop elements until the current stack is equal to the tail shared with
+ // {other}. The shared tail must not only be equal, but also refer to the
+ // same memory.
+ void ResetToCommonAncestor(FunctionalList other) {
+ while (other.Size() > Size()) other.DropFront();
+ while (other.Size() < Size()) DropFront();
+ while (elements_ != other.elements_) {
+ DropFront();
+ other.DropFront();
+ }
+ }
+
+ size_t Size() const { return elements_ ? elements_->size : 0; }
+
+ class iterator {
+ public:
+ explicit iterator(Cons* cur) : current_(cur) {}
+
+ const A& operator*() const { return current_->top; }
+ iterator& operator++() {
+ current_ = current_->rest;
+ return *this;
+ }
+ bool operator==(const iterator& other) const {
+ return this->current_ == other.current_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
+
+ private:
+ Cons* current_;
+ };
+
+ iterator begin() const { return iterator(elements_); }
+ iterator end() const { return iterator(nullptr); }
+
+ private:
+ Cons* elements_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_FUNCTIONAL_LIST_H_
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index a0b2e0ff0a..676860fdcd 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -244,10 +244,10 @@ Operator const* GraphAssembler::ToNumberOperator() {
Callable callable =
Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
Operator::kEliminatable);
- to_number_operator_.set(common()->Call(desc));
+ to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 9ae74d0df5..f3dd4e70f9 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -220,7 +220,7 @@ class GraphAssembler {
Node* DeoptimizeIfNot(DeoptimizeReason reason, VectorSlotPair const& feedback,
Node* condition, Node* frame_state);
template <typename... Args>
- Node* Call(const CallDescriptor* desc, Args... args);
+ Node* Call(const CallDescriptor* call_descriptor, Args... args);
template <typename... Args>
Node* Call(const Operator* op, Args... args);
@@ -406,8 +406,9 @@ void GraphAssembler::GotoIfNot(Node* condition,
}
template <typename... Args>
-Node* GraphAssembler::Call(const CallDescriptor* desc, Args... args) {
- const Operator* op = common()->Call(desc);
+Node* GraphAssembler::Call(const CallDescriptor* call_descriptor,
+ Args... args) {
+ const Operator* op = common()->Call(call_descriptor);
return Call(op, args...);
}
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 517f71e955..adb97ddf4d 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -23,6 +23,8 @@ class Node;
// out-of-line data associated with each node.
typedef uint32_t NodeId;
+// Possible outcomes for decisions.
+enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
// Represents the result of trying to reduce a node in the graph.
class Reduction final {
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 47ded6a30c..91df483622 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -372,6 +372,17 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ j(not_equal, &binop); \
} while (false)
+#define ASSEMBLE_MOVX(mov_instr) \
+ do { \
+ if (instr->addressing_mode() != kMode_None) { \
+ __ mov_instr(i.OutputRegister(), i.MemoryOperand()); \
+ } else if (instr->InputAt(0)->IsRegister()) { \
+ __ mov_instr(i.OutputRegister(), i.InputRegister(0)); \
+ } else { \
+ __ mov_instr(i.OutputRegister(), i.InputOperand(0)); \
+ } \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -484,32 +495,52 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ push(eax); // Push eax so we can use it as a scratch register.
+ __ ComputeCodeStartAddress(eax);
+ __ cmp(eax, kJavaScriptCallCodeStartRegister);
+ __ Assert(equal, AbortReason::kWrongFunctionCodeStart);
+ __ pop(eax); // Restore eax.
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- __ call(&current);
- int pc = __ pc_offset();
- __ bind(&current);
- // In order to get the address of the current instruction, we first need
- // to use a call and then use a pop, thus pushing the return address to
- // the stack and then popping it into the register.
- __ pop(ecx);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ mov(ecx, Operand(ecx, offset));
- __ test(FieldOperand(ecx, CodeDataContainer::kKindSpecificFlagsOffset),
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ mov(ebx, Operand(kJavaScriptCallCodeStartRegister, offset));
+ __ test(FieldOperand(ebx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ __ push(eax); // Push eax so we can use it as a scratch register.
+
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(eax);
+ __ mov(kSpeculationPoisonRegister, Immediate(0));
+ __ cmp(kJavaScriptCallCodeStartRegister, eax);
+ __ mov(eax, Immediate(-1));
+ __ cmov(equal, kSpeculationPoisonRegister, eax);
+
+ __ pop(eax); // Restore eax.
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ and_(kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ and_(kContextRegister, kSpeculationPoisonRegister);
+ __ and_(esp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -524,7 +555,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -537,11 +572,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (info()->IsWasm()) {
__ wasm_call(wasm_code, RelocInfo::WASM_CALL);
} else {
- __ call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
}
} else {
Register reg = i.InputRegister(0);
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -559,7 +602,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -576,7 +623,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -585,7 +636,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -597,6 +652,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
@@ -1403,10 +1459,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
- __ movsx_b(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movsx_b);
break;
case kIA32Movzxbl:
- __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzx_b);
break;
case kIA32Movb: {
size_t index = 0;
@@ -1419,10 +1475,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Movsxwl:
- __ movsx_w(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movsx_w);
break;
case kIA32Movzxwl:
- __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzx_w);
break;
case kIA32Movw: {
size_t index = 0;
@@ -2371,6 +2427,126 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+
+#define I8x16_SPLAT(reg, scratch, v) \
+ __ Move(reg, static_cast<uint32_t>(v)); \
+ __ Pxor(scratch, scratch); \
+ __ Pshufb(reg, scratch)
+
+ case kSSEI8x16Shl: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = 0F0F ... 0F0F (shift=4)
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU >> shift);
+
+ // src = src & tmp
+ // => 0A0a ... 0A0a
+ __ pand(src, tmp);
+
+ // src = src << shift
+ // => A0a0 ... A0a0 (shift=4)
+ __ pslld(src, shift);
+ break;
+ }
+ case kAVXI8x16Shl: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp =
+ dst != src ? dst : i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = 0F0F ... 0F0F (shift=4)
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU >> shift);
+
+ // dst = src & tmp
+ // => 0A0a ... 0A0a
+ __ vpand(dst, src, tmp);
+
+ // dst = dst << shift
+ // => A0a0 ... A0a0 (shift=4)
+ __ vpslld(dst, dst, shift);
+ break;
+ }
+ case kSSEI8x16ShrS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // I16x8 view of I8x16
+ // src = AAaa AAaa ... AAaa AAaa
+
+ // tmp = aa00 aa00 ... aa00 aa00
+ __ movaps(tmp, src);
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0xff00));
+ __ psllw(tmp, 8);
+
+ // src = I16x8ShrS(src, shift)
+ // => SAAa SAAa ... SAAa SAAa (shift=4)
+ __ pshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ psraw(src, shift);
+
+ // tmp = I16x8ShrS(tmp, shift)
+ // => Saa0 Saa0 ... Saa0 Saa0 (shift=4)
+ __ pshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ psraw(tmp, shift);
+
+ // src = I16x8And(src, 0xff00)
+ // => SA00 SA00 ... SA00 SA00
+ __ pand(src, kScratchDoubleReg);
+
+ // tmp = I16x8ShrU(tmp, 8)
+ // => 00Sa 00Sa ... 00Sa 00Sa (shift=4)
+ __ psrlw(tmp, 8);
+
+ // src = I16x8Or(src, tmp)
+ // => SASa SASa ... SASa SASa (shift=4)
+ __ por(src, tmp);
+ break;
+ }
+ case kAVXI8x16ShrS: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // I16x8 view of I8x16
+ // src = AAaa AAaa ... AAaa AAaa
+
+ // tmp = aa00 aa00 ... aa00 aa00
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0xff00));
+ __ vpsllw(tmp, src, 8);
+
+ // dst = I16x8ShrS(src, shift)
+ // => SAAa SAAa ... SAAa SAAa (shift=4)
+ __ vpshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ vpsraw(dst, src, shift);
+
+ // tmp = I16x8ShrS(tmp, shift)
+ // => Saa0 Saa0 ... Saa0 Saa0 (shift=4)
+ __ vpshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ vpsraw(tmp, tmp, shift);
+
+ // dst = I16x8And(dst, 0xff00)
+ // => SA00 SA00 ... SA00 SA00
+ __ vpand(dst, dst, kScratchDoubleReg);
+
+ // tmp = I16x8ShrU(tmp, 8)
+ // => 00Sa 00Sa ... 00Sa 00Sa (shift=4)
+ __ vpsrlw(tmp, tmp, 8);
+
+ // dst = I16x8Or(dst, tmp)
+ // => SASa SASa ... SASa SASa (shift=4)
+ __ vpor(dst, dst, tmp);
+ break;
+ }
case kSSEI8x16Add: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddb(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2415,6 +2591,88 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSEI8x16Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister t0 = i.ToSimd128Register(instr->TempAt(0));
+ XMMRegister t1 = i.ToSimd128Register(instr->TempAt(1));
+
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+
+ // t0 = 00AA 00AA ... 00AA 00AA
+ // t1 = 00BB 00BB ... 00BB 00BB
+ __ movaps(t0, left);
+ __ movaps(t1, right);
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0x00ff));
+ __ psrlw(t0, 8);
+ __ psrlw(t1, 8);
+
+ // left = I16x8Mul(left, right)
+ // => __pp __pp ... __pp __pp
+ // t0 = I16x8Mul(t0, t1)
+ // => __PP __PP ... __PP __PP
+ __ pshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ pmullw(t0, t1);
+ __ pmullw(left, right);
+ __ pshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+
+ // t0 = I16x8Shl(t0, 8)
+ // => PP00 PP00 ... PP00 PP00
+ __ psllw(t0, 8);
+
+ // left = I16x8And(left, 0x00ff)
+ // => 00pp 00pp ... 00pp 00pp
+ __ pand(left, kScratchDoubleReg);
+
+ // left = I16x8Or(left, t0)
+ // => PPpp PPpp ... PPpp PPpp
+ __ por(left, t0);
+ break;
+ }
+ case kAVXI8x16Mul: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister t0 = i.ToSimd128Register(instr->TempAt(0));
+ XMMRegister t1 = i.ToSimd128Register(instr->TempAt(1));
+
+ // I16x8 view of I8x16
+ // left = AAaa AAaa ... AAaa AAaa
+ // right= BBbb BBbb ... BBbb BBbb
+
+ // t0 = 00AA 00AA ... 00AA 00AA
+ // t1 = 00BB 00BB ... 00BB 00BB
+ __ Move(kScratchDoubleReg, static_cast<uint32_t>(0x00ff));
+ __ vpsrlw(t0, left, 8);
+ __ vpsrlw(t1, right, 8);
+
+ // dst = I16x8Mul(left, right)
+ // => __pp __pp ... __pp __pp
+ __ vpshuflw(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+ __ vpmullw(dst, left, right);
+
+ // t0 = I16x8Mul(t0, t1)
+ // => __PP __PP ... __PP __PP
+ __ vpmullw(t0, t0, t1);
+ __ vpshufd(kScratchDoubleReg, kScratchDoubleReg, 0x0);
+
+ // t0 = I16x8Shl(t0, 8)
+ // => PP00 PP00 ... PP00 PP00
+ __ vpsllw(t0, t0, 8);
+
+ // dst = I16x8And(dst, 0x00ff)
+ // => 00pp 00pp ... 00pp 00pp
+ __ vpand(dst, dst, kScratchDoubleReg);
+
+ // dst = I16x8Or(dst, t0)
+ // => PPpp PPpp ... PPpp PPpp
+ __ vpor(dst, dst, t0);
+ break;
+ }
case kSSEI8x16MinS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
@@ -2516,6 +2774,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSEI8x16ShrU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = F0F0 ... F0F0 (shift=4)
+
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU << shift); // needn't byte cast
+
+ // src = src & tmp
+ // => A0a0 ... A0a0
+ __ pand(src, tmp);
+
+ // src = src >> shift
+ // => 0A0a ... 0A0a (shift=4)
+ __ psrld(src, shift);
+ break;
+ }
+ case kAVXI8x16ShrU: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t shift = i.InputInt8(1) & 0x7;
+ XMMRegister tmp =
+ dst != src ? dst : i.ToSimd128Register(instr->TempAt(0));
+
+ // src = AAaa ... AAaa
+ // tmp = F0F0 ... F0F0 (shift=4)
+ I8x16_SPLAT(tmp, kScratchDoubleReg, 0xFFU << shift);
+
+ // src = src & tmp
+ // => A0a0 ... A0a0
+ __ vpand(dst, src, tmp);
+
+ // dst = dst >> shift
+ // => 0A0a ... 0A0a (shift=4)
+ __ vpsrld(dst, dst, shift);
+ break;
+ }
+#undef I8x16_SPLAT
case kSSEI8x16MinU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pminub(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2631,87 +2931,106 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSES128Select: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ // Mask used here is stored in dst.
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
+ __ xorps(kScratchDoubleReg, i.InputSimd128Register(2));
+ __ andps(dst, kScratchDoubleReg);
+ __ xorps(dst, i.InputSimd128Register(2));
+ break;
+ }
+ case kAVXS128Select: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ vxorps(kScratchDoubleReg, i.InputSimd128Register(2),
+ i.InputOperand(1));
+ __ vandps(dst, kScratchDoubleReg, i.InputOperand(0));
+ __ vxorps(dst, dst, i.InputSimd128Register(2));
+ break;
+ }
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
- case kAtomicExchangeInt8: {
+ case kWord32AtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint8: {
+ case kWord32AtomicExchangeUint8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeInt16: {
+ case kWord32AtomicExchangeInt16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint16: {
+ case kWord32AtomicExchangeUint16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeWord32: {
+ case kWord32AtomicExchangeWord32: {
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kAtomicCompareExchangeInt8: {
+ case kWord32AtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
- case kAtomicCompareExchangeUint8: {
+ case kWord32AtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(eax, eax);
break;
}
- case kAtomicCompareExchangeInt16: {
+ case kWord32AtomicCompareExchangeInt16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_w(eax, eax);
break;
}
- case kAtomicCompareExchangeUint16: {
+ case kWord32AtomicCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(eax, eax);
break;
}
- case kAtomicCompareExchangeWord32: {
+ case kWord32AtomicCompareExchangeWord32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: { \
+ case kWord32Atomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
- case kAtomic##op##Uint8: { \
+ case kWord32Atomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
- case kAtomic##op##Int16: { \
+ case kWord32Atomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
- case kAtomic##op##Uint16: { \
+ case kWord32Atomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
- case kAtomic##op##Word32: { \
+ case kWord32Atomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
}
@@ -2721,14 +3040,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, or_)
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
@@ -2798,6 +3117,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ UNREACHABLE();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2843,8 +3167,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
// Use ecx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), ecx);
} else {
@@ -3070,8 +3394,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
// ^ esp ^ ebp
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves = descriptor->CalleeSavedRegisters();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
DCHECK(!info()->is_osr());
int pushed = 0;
@@ -3084,14 +3408,14 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ push(ebp);
__ mov(ebp, esp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -3099,8 +3423,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3115,7 +3439,7 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (shrink_slots > 0) {
if (info()->IsWasm() && shrink_slots > 128) {
// For WebAssembly functions with big frames we have to do the stack
@@ -3174,9 +3498,9 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
// Restore registers.
if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
@@ -3191,10 +3515,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Might need ecx for scratch if pop_size is too big or if there is a variable
// pop count.
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
IA32OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now if they always have the same
@@ -3211,8 +3535,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
AssembleDeconstructFrame();
}
}
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & edx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
if (pop->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
@@ -3231,119 +3555,129 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
IA32OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- Operand dst = g.ToOperand(destination);
- __ mov(dst, src);
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = g.ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ mov(dst, src);
- } else {
- Operand dst = g.ToOperand(destination);
- __ push(src);
- __ pop(dst);
- }
- } else if (source->IsConstant()) {
- Constant src_constant = g.ToConstant(source);
- if (src_constant.type() == Constant::kHeapObject) {
- Handle<HeapObject> src = src_constant.ToHeapObject();
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(dst, src);
+ // Dispatch on the source and destination operand kinds.
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ mov(g.ToRegister(destination), g.ToRegister(source));
} else {
- DCHECK(destination->IsStackSlot());
- Operand dst = g.ToOperand(destination);
- __ mov(dst, src);
+ DCHECK(source->IsFPRegister());
+ __ movaps(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
}
- } else if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (destination->IsStackSlot()) {
+ return;
+ case MoveType::kRegisterToStack: {
Operand dst = g.ToOperand(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (src_constant.type() == Constant::kFloat32) {
- // TODO(turbofan): Can we do better here?
- uint32_t src = src_constant.ToFloat32AsInt();
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ if (source->IsRegister()) {
+ __ mov(dst, g.ToRegister(source));
} else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- __ Move(dst, Immediate(src));
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(dst, src);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(dst, src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(dst, src);
+ }
}
- } else {
- DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = src_constant.ToFloat64().AsUint64();
- uint32_t lower = static_cast<uint32_t>(src);
- uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ Operand src = g.ToOperand(source);
+ if (source->IsStackSlot()) {
+ __ mov(g.ToRegister(destination), src);
} else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst0 = g.ToOperand(destination);
- Operand dst1 = g.ToOperand(destination, kPointerSize);
- __ Move(dst0, Immediate(lower));
- __ Move(dst1, Immediate(upper));
+ DCHECK(source->IsFPStackSlot());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(dst, src);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(dst, src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(dst, src);
+ }
}
+ return;
}
- } else if (source->IsFPRegister()) {
- XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- DCHECK(destination->IsFPStackSlot());
+ case MoveType::kStackToStack: {
+ Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
- MachineRepresentation rep =
- LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(dst, src);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(dst, src);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(dst, src);
- }
- }
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- Operand src = g.ToOperand(source);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(dst, src);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(dst, src);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(dst, src);
+ if (source->IsStackSlot()) {
+ __ push(src);
+ __ pop(dst);
+ } else {
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, src);
+ __ movss(dst, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, src);
+ __ movsd(dst, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, src);
+ __ movups(dst, kScratchDoubleReg);
+ }
}
- } else {
+ return;
+ }
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ if (src.type() == Constant::kHeapObject) {
+ __ Move(dst, src.ToHeapObject());
+ } else {
+ __ Move(dst, g.ToImmediate(source));
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ if (src.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ __ Move(dst, src.ToFloat32AsInt());
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ __ Move(dst, src.ToFloat64().AsUint64());
+ }
+ }
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(kScratchDoubleReg, src);
- __ movsd(dst, kScratchDoubleReg);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(kScratchDoubleReg, src);
- __ movss(dst, kScratchDoubleReg);
+ if (destination->IsStackSlot()) {
+ if (src.type() == Constant::kHeapObject) {
+ __ mov(dst, src.ToHeapObject());
+ } else {
+ __ Move(dst, g.ToImmediate(source));
+ }
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(kScratchDoubleReg, src);
- __ movups(dst, kScratchDoubleReg);
+ DCHECK(destination->IsFPStackSlot());
+ if (src.type() == Constant::kFloat32) {
+ __ Move(dst, Immediate(src.ToFloat32AsInt()));
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ uint64_t constant_value = src.ToFloat64().AsUint64();
+ uint32_t lower = static_cast<uint32_t>(constant_value);
+ uint32_t upper = static_cast<uint32_t>(constant_value >> 32);
+ Operand dst0 = dst;
+ Operand dst1 = g.ToOperand(destination, kPointerSize);
+ __ Move(dst0, Immediate(lower));
+ __ Move(dst1, Immediate(upper));
+ }
}
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
@@ -3352,94 +3686,106 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = g.ToRegister(source);
- Register dst = g.ToRegister(destination);
- __ push(src);
- __ mov(src, dst);
- __ pop(dst);
- } else if (source->IsRegister() && destination->IsStackSlot()) {
- // Register-memory.
- Register src = g.ToRegister(source);
- __ push(src);
- frame_access_state()->IncreaseSPDelta(1);
- Operand dst = g.ToOperand(destination);
- __ mov(src, dst);
- frame_access_state()->IncreaseSPDelta(-1);
- dst = g.ToOperand(destination);
- __ pop(dst);
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory.
- Operand dst1 = g.ToOperand(destination);
- __ push(dst1);
- frame_access_state()->IncreaseSPDelta(1);
- Operand src1 = g.ToOperand(source);
- __ push(src1);
- Operand dst2 = g.ToOperand(destination);
- __ pop(dst2);
- frame_access_state()->IncreaseSPDelta(-1);
- Operand src2 = g.ToOperand(source);
- __ pop(src2);
- } else if (source->IsFPRegister() && destination->IsFPRegister()) {
- // XMM register-register swap.
- XMMRegister src = g.ToDoubleRegister(source);
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(kScratchDoubleReg, src);
- __ movaps(src, dst);
- __ movaps(dst, kScratchDoubleReg);
- } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
- // XMM register-memory swap.
- XMMRegister reg = g.ToDoubleRegister(source);
- Operand other = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(kScratchDoubleReg, other);
- __ movsd(other, reg);
- __ movaps(reg, kScratchDoubleReg);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(kScratchDoubleReg, other);
- __ movss(other, reg);
- __ movaps(reg, kScratchDoubleReg);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(kScratchDoubleReg, other);
- __ movups(other, reg);
- __ movups(reg, kScratchDoubleReg);
- }
- } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
- // Double-width memory-to-memory.
- Operand src0 = g.ToOperand(source);
- Operand dst0 = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
- __ push(src0); // Then use stack to copy src to destination.
- __ pop(dst0);
- __ push(g.ToOperand(source, kPointerSize));
- __ pop(g.ToOperand(destination, kPointerSize));
- __ movsd(src0, kScratchDoubleReg);
- } else if (rep == MachineRepresentation::kFloat32) {
- __ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
- __ push(src0); // Then use stack to copy src to destination.
- __ pop(dst0);
- __ movss(src0, kScratchDoubleReg);
- } else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
- __ push(src0); // Then use stack to copy src to destination.
- __ pop(dst0);
- __ push(g.ToOperand(source, kPointerSize));
- __ pop(g.ToOperand(destination, kPointerSize));
- __ push(g.ToOperand(source, 2 * kPointerSize));
- __ pop(g.ToOperand(destination, 2 * kPointerSize));
- __ push(g.ToOperand(source, 3 * kPointerSize));
- __ pop(g.ToOperand(destination, 3 * kPointerSize));
- __ movups(src0, kScratchDoubleReg);
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ push(src);
+ __ mov(src, dst);
+ __ pop(dst);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movaps(kScratchDoubleReg, src);
+ __ movaps(src, dst);
+ __ movaps(dst, kScratchDoubleReg);
+ }
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ case MoveType::kRegisterToStack: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ __ push(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand dst = g.ToOperand(destination);
+ __ mov(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ pop(dst);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ Operand dst = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, dst);
+ __ movss(dst, src);
+ __ movaps(src, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, dst);
+ __ movsd(dst, src);
+ __ movaps(src, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, dst);
+ __ movups(dst, src);
+ __ movups(src, kScratchDoubleReg);
+ }
+ }
+ return;
+ }
+ case MoveType::kStackToStack: {
+ if (source->IsStackSlot()) {
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
+ } else {
+ DCHECK(source->IsFPStackSlot());
+ Operand src0 = g.ToOperand(source);
+ Operand dst0 = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ movss(src0, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ push(g.ToOperand(source, kPointerSize));
+ __ pop(g.ToOperand(destination, kPointerSize));
+ __ movsd(src0, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ push(g.ToOperand(source, kPointerSize));
+ __ pop(g.ToOperand(destination, kPointerSize));
+ __ push(g.ToOperand(source, 2 * kPointerSize));
+ __ pop(g.ToOperand(destination, 2 * kPointerSize));
+ __ push(g.ToOperand(source, 3 * kPointerSize));
+ __ pop(g.ToOperand(destination, 3 * kPointerSize));
+ __ movups(src0, kScratchDoubleReg);
+ }
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -3451,6 +3797,13 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
#undef __
+#undef kScratchDoubleReg
+#undef ASSEMBLE_COMPARE
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+#undef ASSEMBLE_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_MOVX
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index a17d9f06ce..55833df4d4 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -229,6 +229,10 @@ namespace compiler {
V(IA32I8x16ExtractLane) \
V(SSEI8x16ReplaceLane) \
V(AVXI8x16ReplaceLane) \
+ V(SSEI8x16Shl) \
+ V(AVXI8x16Shl) \
+ V(SSEI8x16ShrS) \
+ V(AVXI8x16ShrS) \
V(IA32I8x16Neg) \
V(SSEI8x16Add) \
V(AVXI8x16Add) \
@@ -238,6 +242,8 @@ namespace compiler {
V(AVXI8x16Sub) \
V(SSEI8x16SubSaturateS) \
V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16Mul) \
+ V(AVXI8x16Mul) \
V(SSEI8x16MinS) \
V(AVXI8x16MinS) \
V(SSEI8x16MaxS) \
@@ -254,6 +260,8 @@ namespace compiler {
V(AVXI8x16AddSaturateU) \
V(SSEI8x16SubSaturateU) \
V(AVXI8x16SubSaturateU) \
+ V(SSEI8x16ShrU) \
+ V(AVXI8x16ShrU) \
V(SSEI8x16MinU) \
V(AVXI8x16MinU) \
V(SSEI8x16MaxU) \
@@ -270,7 +278,9 @@ namespace compiler {
V(SSES128Or) \
V(AVXS128Or) \
V(SSES128Xor) \
- V(AVXS128Xor)
+ V(AVXS128Xor) \
+ V(SSES128Select) \
+ V(AVXS128Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index db43c1ed1c..3c2207eee2 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -212,6 +212,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEI8x16ReplaceLane:
case kAVXI8x16ReplaceLane:
case kIA32I8x16Neg:
+ case kSSEI8x16Shl:
+ case kAVXI8x16Shl:
+ case kSSEI8x16ShrS:
+ case kAVXI8x16ShrS:
case kSSEI8x16Add:
case kAVXI8x16Add:
case kSSEI8x16AddSaturateS:
@@ -220,6 +224,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16Sub:
case kSSEI8x16SubSaturateS:
case kAVXI8x16SubSaturateS:
+ case kSSEI8x16Mul:
+ case kAVXI8x16Mul:
case kSSEI8x16MinS:
case kAVXI8x16MinS:
case kSSEI8x16MaxS:
@@ -236,6 +242,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16AddSaturateU:
case kSSEI8x16SubSaturateU:
case kAVXI8x16SubSaturateU:
+ case kSSEI8x16ShrU:
+ case kAVXI8x16ShrU:
case kSSEI8x16MinU:
case kAVXI8x16MinU:
case kSSEI8x16MaxU:
@@ -253,6 +261,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS128Or:
case kSSES128Xor:
case kAVXS128Xor:
+ case kSSES128Select:
+ case kAVXS128Select:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index d8bf250ec6..aa6e9fd607 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -77,6 +77,8 @@ class IA32OperandGenerator final : public OperandGenerator {
Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
+#else
+ return false;
#endif
}
default:
@@ -208,6 +210,20 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
}
+void VisitRRISimd(InstructionSelector* selector, Node* node,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
} // namespace
@@ -271,9 +287,15 @@ void InstructionSelector::VisitLoad(Node* node) {
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ code |= MiscField::encode(kMemoryAccessPoisoned);
+ }
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -710,27 +732,29 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
-#define RO_OP_LIST(V) \
- V(Word32Clz, kIA32Lzcnt) \
- V(Word32Ctz, kIA32Tzcnt) \
- V(Word32Popcnt, kIA32Popcnt) \
- V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
- V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
- V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
- V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
- V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
- V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
- V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
- V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
- V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
- V(BitcastFloat32ToInt32, kIA32BitcastFI) \
- V(BitcastInt32ToFloat32, kIA32BitcastIF) \
- V(Float32Sqrt, kSSEFloat32Sqrt) \
- V(Float64Sqrt, kSSEFloat64Sqrt) \
- V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
- V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
+#define RO_OP_LIST(V) \
+ V(Word32Clz, kIA32Lzcnt) \
+ V(Word32Ctz, kIA32Tzcnt) \
+ V(Word32Popcnt, kIA32Popcnt) \
+ V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
+ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
+ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
+ V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
+ V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
+ V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \
+ V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
+ V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
+ V(BitcastFloat32ToInt32, kIA32BitcastFI) \
+ V(BitcastInt32ToFloat32, kIA32BitcastIF) \
+ V(Float32Sqrt, kSSEFloat32Sqrt) \
+ V(Float64Sqrt, kSSEFloat64Sqrt) \
+ V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32, kIA32Movsxbl) \
+ V(SignExtendWord16ToInt32, kIA32Movsxwl)
#define RR_OP_LIST(V) \
V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
@@ -766,6 +790,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
+#undef RO_OP_LIST
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -773,6 +798,7 @@ RO_OP_LIST(RO_VISITOR)
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
#define RRO_FLOAT_VISITOR(Name, avx, sse) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -780,6 +806,7 @@ RR_OP_LIST(RR_VISITOR)
}
RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
#undef RRO_FLOAT_VISITOR
+#undef RRO_FLOAT_OP_LIST
#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -787,6 +814,7 @@ RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
}
FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
#undef FLOAT_UNOP_VISITOR
+#undef FLOAT_UNOP_LIST
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
@@ -955,16 +983,16 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
IA32OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
InstructionOperand temps[] = {g.TempRegister()};
size_t const temp_count = arraysize(temps);
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
@@ -1015,9 +1043,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
IA32OperandGenerator g(this);
int reverse_slot = 0;
@@ -1025,7 +1053,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
@@ -1270,30 +1298,22 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Compare(Load(js_stack_limit), LoadStackPointer)
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kIA32StackCheck);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
- selector->Emit(opcode, g.DefineAsRegister(cont->result()));
- }
+ CHECK(cont->IsBranch());
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
return;
}
}
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
+} // namespace
// Shared routine for word comparison with zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1302,41 +1322,41 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1348,17 +1368,17 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kIA32Add, cont);
+ return VisitBinop(this, node, kIA32Add, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kIA32Sub, cont);
+ return VisitBinop(this, node, kIA32Sub, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kIA32Imul, cont);
+ return VisitBinop(this, node, kIA32Imul, cont);
default:
break;
}
@@ -1366,52 +1386,17 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kIA32Test, cont);
+ return VisitWordCompare(this, value, kIA32Test, cont);
default:
break;
}
}
// Continuation could not be combined with a compare, emit compare against 0.
- IA32OperandGenerator g(selector);
- VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+ IA32OperandGenerator g(this);
+ VisitCompare(this, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -1419,24 +1404,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -1448,7 +1435,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, &cont);
}
@@ -1577,7 +1564,7 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
@@ -1586,7 +1573,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
VisitLoad(node);
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1596,13 +1583,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
break;
default:
UNREACHABLE();
@@ -1628,7 +1615,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1637,15 +1624,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1677,7 +1664,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1687,15 +1674,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1771,11 +1758,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -1888,11 +1876,52 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
}
}
+#define SIMD_I8X16_SHIFT_OPCODES(V) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define VISIT_SIMD_I8X16_SHIFT(Op) \
+ void InstructionSelector::Visit##Op(Node* node) { \
+ VisitRRISimd(this, node, kAVX##Op, kSSE##Op); \
+ }
+
+SIMD_I8X16_SHIFT_OPCODES(VISIT_SIMD_I8X16_SHIFT)
+#undef SIMD_I8X16_SHIFT_OPCODES
+#undef VISIT_SIMD_I8X16_SHIFT
+
+void InstructionSelector::VisitI8x16Mul(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempSimd128Register(),
+ g.TempSimd128Register()};
+ if (IsSupported(AVX)) {
+ Emit(kAVXI8x16Mul, g.DefineAsRegister(node), operand0, operand1,
+ arraysize(temps), temps);
+ } else {
+ Emit(kSSEI8x16Mul, g.DefineSameAsFirst(node), operand0, operand1,
+ arraysize(temps), temps);
+ }
+}
+
void InstructionSelector::VisitS128Zero(Node* node) {
IA32OperandGenerator g(this);
Emit(kIA32S128Zero, g.DefineAsRegister(node));
}
+void InstructionSelector::VisitS128Select(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand2 = g.UseRegister(node->InputAt(2));
+ if (IsSupported(AVX)) {
+ Emit(kAVXS128Select, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ g.Use(node->InputAt(1)), operand2);
+ } else {
+ Emit(kSSES128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ operand2);
+ }
+}
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
@@ -1928,6 +1957,7 @@ SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
+#undef SIMD_INT_TYPES
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1942,6 +1972,7 @@ VISIT_SIMD_REPLACE_LANE(F32x4)
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
+#undef SIMD_SHIFT_OPCODES
#define VISIT_SIMD_INT_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1950,6 +1981,7 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
}
SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
#undef VISIT_SIMD_INT_UNOP
+#undef SIMD_INT_UNOP_LIST
#define VISIT_SIMD_OTHER_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1959,6 +1991,7 @@ SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
}
SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
#undef VISIT_SIMD_OTHER_UNOP
+#undef SIMD_OTHER_UNOP_LIST
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -1966,6 +1999,7 @@ SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
+#undef SIMD_BINOP_LIST
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
@@ -2005,6 +2039,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index df3078d739..035833af0f 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -69,49 +69,49 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(ArchStackSlot) \
- V(AtomicLoadInt8) \
- V(AtomicLoadUint8) \
- V(AtomicLoadInt16) \
- V(AtomicLoadUint16) \
- V(AtomicLoadWord32) \
- V(AtomicStoreWord8) \
- V(AtomicStoreWord16) \
- V(AtomicStoreWord32) \
- V(AtomicExchangeInt8) \
- V(AtomicExchangeUint8) \
- V(AtomicExchangeInt16) \
- V(AtomicExchangeUint16) \
- V(AtomicExchangeWord32) \
- V(AtomicCompareExchangeInt8) \
- V(AtomicCompareExchangeUint8) \
- V(AtomicCompareExchangeInt16) \
- V(AtomicCompareExchangeUint16) \
- V(AtomicCompareExchangeWord32) \
- V(AtomicAddInt8) \
- V(AtomicAddUint8) \
- V(AtomicAddInt16) \
- V(AtomicAddUint16) \
- V(AtomicAddWord32) \
- V(AtomicSubInt8) \
- V(AtomicSubUint8) \
- V(AtomicSubInt16) \
- V(AtomicSubUint16) \
- V(AtomicSubWord32) \
- V(AtomicAndInt8) \
- V(AtomicAndUint8) \
- V(AtomicAndInt16) \
- V(AtomicAndUint16) \
- V(AtomicAndWord32) \
- V(AtomicOrInt8) \
- V(AtomicOrUint8) \
- V(AtomicOrInt16) \
- V(AtomicOrUint16) \
- V(AtomicOrWord32) \
- V(AtomicXorInt8) \
- V(AtomicXorUint8) \
- V(AtomicXorInt16) \
- V(AtomicXorUint16) \
- V(AtomicXorWord32) \
+ V(Word32AtomicLoadInt8) \
+ V(Word32AtomicLoadUint8) \
+ V(Word32AtomicLoadInt16) \
+ V(Word32AtomicLoadUint16) \
+ V(Word32AtomicLoadWord32) \
+ V(Word32AtomicStoreWord8) \
+ V(Word32AtomicStoreWord16) \
+ V(Word32AtomicStoreWord32) \
+ V(Word32AtomicExchangeInt8) \
+ V(Word32AtomicExchangeUint8) \
+ V(Word32AtomicExchangeInt16) \
+ V(Word32AtomicExchangeUint16) \
+ V(Word32AtomicExchangeWord32) \
+ V(Word32AtomicCompareExchangeInt8) \
+ V(Word32AtomicCompareExchangeUint8) \
+ V(Word32AtomicCompareExchangeInt16) \
+ V(Word32AtomicCompareExchangeUint16) \
+ V(Word32AtomicCompareExchangeWord32) \
+ V(Word32AtomicAddInt8) \
+ V(Word32AtomicAddUint8) \
+ V(Word32AtomicAddInt16) \
+ V(Word32AtomicAddUint16) \
+ V(Word32AtomicAddWord32) \
+ V(Word32AtomicSubInt8) \
+ V(Word32AtomicSubUint8) \
+ V(Word32AtomicSubInt16) \
+ V(Word32AtomicSubUint16) \
+ V(Word32AtomicSubWord32) \
+ V(Word32AtomicAndInt8) \
+ V(Word32AtomicAndUint8) \
+ V(Word32AtomicAndInt16) \
+ V(Word32AtomicAndUint16) \
+ V(Word32AtomicAndWord32) \
+ V(Word32AtomicOrInt8) \
+ V(Word32AtomicOrUint8) \
+ V(Word32AtomicOrInt16) \
+ V(Word32AtomicOrUint16) \
+ V(Word32AtomicOrWord32) \
+ V(Word32AtomicXorInt8) \
+ V(Word32AtomicXorUint8) \
+ V(Word32AtomicXorInt16) \
+ V(Word32AtomicXorUint16) \
+ V(Word32AtomicXorWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
@@ -174,9 +174,11 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
- kFlags_deoptimize = 2,
- kFlags_set = 3,
- kFlags_trap = 4
+ kFlags_branch_and_poison = 2,
+ kFlags_deoptimize = 3,
+ kFlags_deoptimize_and_poison = 4,
+ kFlags_set = 5,
+ kFlags_trap = 6
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -219,6 +221,12 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const FlagsCondition& fc);
+enum MemoryAccessMode {
+ kMemoryAccessDirect = 0,
+ kMemoryAccessProtected = 1,
+ kMemoryAccessPoisoned = 2
+};
+
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
// interesting to the register allocator, as the inputs and flags on the
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index f7afaab697..905ae4e6f0 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -304,53 +304,53 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
return kIsLoadOperation;
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
return kHasSideEffect;
- case kAtomicExchangeInt8:
- case kAtomicExchangeUint8:
- case kAtomicExchangeInt16:
- case kAtomicExchangeUint16:
- case kAtomicExchangeWord32:
- case kAtomicCompareExchangeInt8:
- case kAtomicCompareExchangeUint8:
- case kAtomicCompareExchangeInt16:
- case kAtomicCompareExchangeUint16:
- case kAtomicCompareExchangeWord32:
- case kAtomicAddInt8:
- case kAtomicAddUint8:
- case kAtomicAddInt16:
- case kAtomicAddUint16:
- case kAtomicAddWord32:
- case kAtomicSubInt8:
- case kAtomicSubUint8:
- case kAtomicSubInt16:
- case kAtomicSubUint16:
- case kAtomicSubWord32:
- case kAtomicAndInt8:
- case kAtomicAndUint8:
- case kAtomicAndInt16:
- case kAtomicAndUint16:
- case kAtomicAndWord32:
- case kAtomicOrInt8:
- case kAtomicOrUint8:
- case kAtomicOrInt16:
- case kAtomicOrUint16:
- case kAtomicOrWord32:
- case kAtomicXorInt8:
- case kAtomicXorUint8:
- case kAtomicXorInt16:
- case kAtomicXorUint16:
- case kAtomicXorWord32:
+ case kWord32AtomicExchangeInt8:
+ case kWord32AtomicExchangeUint8:
+ case kWord32AtomicExchangeInt16:
+ case kWord32AtomicExchangeUint16:
+ case kWord32AtomicExchangeWord32:
+ case kWord32AtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeWord32:
+ case kWord32AtomicAddInt8:
+ case kWord32AtomicAddUint8:
+ case kWord32AtomicAddInt16:
+ case kWord32AtomicAddUint16:
+ case kWord32AtomicAddWord32:
+ case kWord32AtomicSubInt8:
+ case kWord32AtomicSubUint8:
+ case kWord32AtomicSubInt16:
+ case kWord32AtomicSubUint16:
+ case kWord32AtomicSubWord32:
+ case kWord32AtomicAndInt8:
+ case kWord32AtomicAndUint8:
+ case kWord32AtomicAndInt16:
+ case kWord32AtomicAndUint16:
+ case kWord32AtomicAndWord32:
+ case kWord32AtomicOrInt8:
+ case kWord32AtomicOrUint8:
+ case kWord32AtomicOrInt16:
+ case kWord32AtomicOrUint16:
+ case kWord32AtomicOrWord32:
+ case kWord32AtomicXorInt8:
+ case kWord32AtomicXorUint8:
+ case kWord32AtomicXorInt16:
+ case kWord32AtomicXorUint16:
+ case kWord32AtomicXorWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
@@ -365,7 +365,8 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
- (instr->flags_mode() == kFlags_branch));
+ (instr->flags_mode() == kFlags_branch) ||
+ (instr->flags_mode() == kFlags_branch_and_poison));
}
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 7c7a2708c5..56ccd9fc64 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -206,6 +206,15 @@ class OperandGenerator {
return op;
}
+ InstructionOperand TempSimd128Register() {
+ UnallocatedOperand op = UnallocatedOperand(
+ UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
+ sequence()->MarkAsRepresentation(MachineRepresentation::kSimd128,
+ op.virtual_register());
+ return op;
+ }
+
InstructionOperand TempRegister(Register reg) {
return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, reg.code(),
InstructionOperand::kInvalidVirtualRegister);
@@ -353,14 +362,21 @@ class FlagsContinuation final {
// Creates a new flags continuation from the given condition and true/false
// blocks.
- FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
- BasicBlock* false_block)
- : mode_(kFlags_branch),
- condition_(condition),
- true_block_(true_block),
- false_block_(false_block) {
- DCHECK_NOT_NULL(true_block);
- DCHECK_NOT_NULL(false_block);
+ static FlagsContinuation ForBranch(FlagsCondition condition,
+ BasicBlock* true_block,
+ BasicBlock* false_block,
+ LoadPoisoning masking) {
+ FlagsMode mode = masking == LoadPoisoning::kDoPoison
+ ? kFlags_branch_and_poison
+ : kFlags_branch;
+ return FlagsContinuation(mode, condition, true_block, false_block);
+ }
+
+ static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
+ BasicBlock* true_block,
+ BasicBlock* false_block) {
+ return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
+ false_block);
}
// Creates a new flags continuation for an eager deoptimization exit.
@@ -368,8 +384,13 @@ class FlagsContinuation final {
DeoptimizeKind kind,
DeoptimizeReason reason,
VectorSlotPair const& feedback,
- Node* frame_state) {
- return FlagsContinuation(condition, kind, reason, feedback, frame_state);
+ Node* frame_state,
+ LoadPoisoning masking) {
+ FlagsMode mode = masking == LoadPoisoning::kDoPoison
+ ? kFlags_deoptimize_and_poison
+ : kFlags_deoptimize;
+ return FlagsContinuation(mode, condition, kind, reason, feedback,
+ frame_state);
}
// Creates a new flags continuation for a boolean value.
@@ -384,8 +405,16 @@ class FlagsContinuation final {
}
bool IsNone() const { return mode_ == kFlags_none; }
- bool IsBranch() const { return mode_ == kFlags_branch; }
- bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
+ bool IsBranch() const {
+ return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
+ }
+ bool IsDeoptimize() const {
+ return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
+ }
+ bool IsPoisoned() const {
+ return mode_ == kFlags_branch_and_poison ||
+ mode_ == kFlags_deoptimize_and_poison;
+ }
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
FlagsCondition condition() const {
@@ -473,17 +502,30 @@ class FlagsContinuation final {
}
private:
- FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
- DeoptimizeReason reason, VectorSlotPair const& feedback,
- Node* frame_state)
- : mode_(kFlags_deoptimize),
+ FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+ BasicBlock* true_block, BasicBlock* false_block)
+ : mode_(mode),
+ condition_(condition),
+ true_block_(true_block),
+ false_block_(false_block) {
+ DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
+ DCHECK_NOT_NULL(true_block);
+ DCHECK_NOT_NULL(false_block);
+ }
+
+ FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback, Node* frame_state)
+ : mode_(mode),
condition_(condition),
kind_(kind),
reason_(reason),
feedback_(feedback),
frame_state_or_result_(frame_state) {
+ DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
DCHECK_NOT_NULL(frame_state);
}
+
FlagsContinuation(FlagsCondition condition, Node* result)
: mode_(kFlags_set),
condition_(condition),
@@ -502,13 +544,13 @@ class FlagsContinuation final {
FlagsMode const mode_;
FlagsCondition condition_;
- DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize
- DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize
- VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize
- Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
+ DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
+ DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
+ VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize*
+ Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
// or mode_ == kFlags_set.
- BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
- BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
+ BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
+ BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
Runtime::FunctionId trap_id_; // Only valid if mode_ == kFlags_trap.
};
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index c94b42b458..954a1fc272 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -24,9 +24,11 @@ InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
+ EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
- EnableSerialization enable_serialization)
+ EnableSerialization enable_serialization, LoadPoisoning load_poisoning)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -45,6 +47,9 @@ InstructionSelector::InstructionSelector(
scheduler_(nullptr),
enable_scheduling_(enable_scheduling),
enable_serialization_(enable_serialization),
+ enable_switch_jump_table_(enable_switch_jump_table),
+ enable_speculation_poison_(enable_speculation_poison),
+ load_poisoning_(load_poisoning),
frame_(frame),
instruction_selection_failed_(false) {
instructions_.reserve(node_count);
@@ -651,16 +656,16 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
struct CallBuffer {
- CallBuffer(Zone* zone, const CallDescriptor* descriptor,
+ CallBuffer(Zone* zone, const CallDescriptor* call_descriptor,
FrameStateDescriptor* frame_state)
- : descriptor(descriptor),
+ : descriptor(call_descriptor),
frame_state_descriptor(frame_state),
output_nodes(zone),
outputs(zone),
instruction_args(zone),
pushed_nodes(zone) {
- output_nodes.reserve(descriptor->ReturnCount());
- outputs.reserve(descriptor->ReturnCount());
+ output_nodes.reserve(call_descriptor->ReturnCount());
+ outputs.reserve(call_descriptor->ReturnCount());
pushed_nodes.reserve(input_count());
instruction_args.reserve(input_count() + frame_state_value_count());
}
@@ -758,19 +763,34 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
Node* callee = call->InputAt(0);
bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
+ bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
+ // TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
+ // JS-linkage callers with a register code target. The problem is that the
+ // code target register may be clobbered before the final jmp by
+ // AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
+ // entirely remove support for tail-calls from JS-linkage callers.
buffer->instruction_args.push_back(
(call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
? g.UseImmediate(callee)
- : g.UseRegister(callee));
+ : call_use_fixed_target_reg
+ ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
+#ifdef V8_EMBEDDED_BUILTINS
+ : is_tail_call ? g.UseUniqueRegister(callee)
+ : g.UseRegister(callee));
+#else
+ : g.UseRegister(callee));
+#endif
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
- : g.UseRegister(callee));
+ : call_use_fixed_target_reg
+ ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallWasmFunction:
buffer->instruction_args.push_back(
@@ -778,7 +798,9 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
(callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
callee->opcode() == IrOpcode::kRelocatableInt32Constant))
? g.UseImmediate(callee)
- : g.UseRegister(callee));
+ : call_use_fixed_target_reg
+ ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
+ : g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
@@ -1161,6 +1183,11 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
+ case IrOpcode::kPoisonedLoad: {
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
+ return VisitPoisonedLoad(node);
+ }
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
@@ -1470,6 +1497,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
+ case IrOpcode::kSpeculationPoison:
+ return VisitSpeculationPoison(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kLoadStackPointer:
@@ -1510,18 +1539,18 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
- case IrOpcode::kAtomicLoad: {
+ case IrOpcode::kWord32AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
- return VisitAtomicLoad(node);
+ return VisitWord32AtomicLoad(node);
}
- case IrOpcode::kAtomicStore:
- return VisitAtomicStore(node);
+ case IrOpcode::kWord32AtomicStore:
+ return VisitWord32AtomicStore(node);
#define ATOMIC_CASE(name) \
- case IrOpcode::kAtomic##name: { \
+ case IrOpcode::kWord32Atomic##name: { \
MachineType type = AtomicOpRepresentationOf(node->op()); \
MarkAsRepresentation(type.representation(), node); \
- return VisitAtomic##name(node); \
+ return VisitWord32Atomic##name(node); \
}
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
@@ -1538,6 +1567,16 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
}
+ case IrOpcode::kSignExtendWord8ToInt32:
+ return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
+ case IrOpcode::kSignExtendWord16ToInt32:
+ return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
+ case IrOpcode::kSignExtendWord8ToInt64:
+ return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
+ case IrOpcode::kSignExtendWord16ToInt64:
+ return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
+ case IrOpcode::kSignExtendWord32ToInt64:
+ return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
@@ -1774,12 +1813,20 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kS1x16AllTrue:
return MarkAsWord32(node), VisitS1x16AllTrue(node);
default:
- V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
- node->opcode(), node->op()->mnemonic(), node->id());
+ FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
+ node->op()->mnemonic(), node->id());
break;
}
}
+void InstructionSelector::VisitSpeculationPoison(Node* node) {
+ CHECK(enable_speculation_poison_ == kEnableSpeculationPoison);
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsLocation(node, LinkageLocation::ForRegister(
+ kSpeculationPoisonRegister.code(),
+ MachineType::UintPtr())));
+}
+
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
@@ -2078,6 +2125,18 @@ void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
UNIMPLEMENTED();
}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
#endif // V8_TARGET_ARCH_32_BIT
// 64 bit targets do not implement the following instructions.
@@ -2108,36 +2167,14 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2154,6 +2191,12 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
@@ -2204,55 +2247,33 @@ void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
+
+void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
+ UNIMPLEMENTED();
+}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
+void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
@@ -2365,15 +2386,15 @@ void InstructionSelector::VisitConstant(Node* node) {
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
- const CallDescriptor* descriptor = CallDescriptorOf(node->op());
+ auto call_descriptor = CallDescriptorOf(node->op());
FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
+ if (call_descriptor->NeedsFrameState()) {
frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
+ node->InputAt(static_cast<int>(call_descriptor->InputCount())));
}
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+ CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on some architectures it's probably better to use
@@ -2383,10 +2404,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
- EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+ EmitPrepareArguments(&(buffer.pushed_nodes), call_descriptor, node);
// Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
+ CallDescriptor::Flags flags = call_descriptor->flags();
if (handler) {
DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
flags |= CallDescriptor::kHasExceptionHandler;
@@ -2395,11 +2416,10 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount()));
+ opcode = kArchCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount()));
break;
case CallDescriptor::kCallCodeObject:
opcode = kArchCallCodeObject | MiscField::encode(flags);
@@ -2421,7 +2441,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
if (instruction_selection_failed()) return;
call_instr->MarkAsCall();
- EmitPrepareResults(&(buffer.output_nodes), descriptor, node);
+ EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
}
void InstructionSelector::VisitCallWithCallerSavedRegisters(
@@ -2438,26 +2458,29 @@ void InstructionSelector::VisitCallWithCallerSavedRegisters(
void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
- CallDescriptor const* descriptor = CallDescriptorOf(node->op());
+ auto call_descriptor = CallDescriptorOf(node->op());
CallDescriptor* caller = linkage()->GetIncomingDescriptor();
DCHECK(caller->CanTailCall(node));
const CallDescriptor* callee = CallDescriptorOf(node->op());
int stack_param_delta = callee->GetStackParameterDelta(caller);
- CallBuffer buffer(zone(), descriptor, nullptr);
+ CallBuffer buffer(zone(), call_descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
CallBufferFlags flags(kCallCodeImmediate | kCallTail);
if (IsTailCallAddressImmediate()) {
flags |= kCallAddressImmediate;
}
+ if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
+ flags |= kCallFixedTargetRegister;
+ }
InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
InstructionOperandVector temps(zone());
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchTailCallCodeObjectFromJSFunction;
break;
@@ -2470,7 +2493,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
temps.push_back(g.TempRegister());
}
} else {
- switch (descriptor->kind()) {
+ switch (call_descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchTailCallCodeObject;
break;
@@ -2485,7 +2508,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
return;
}
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(call_descriptor->flags());
Emit(kArchPrepareTailCall, g.NoOutput());
@@ -2534,6 +2557,51 @@ void InstructionSelector::VisitReturn(Node* ret) {
Emit(kArchRet, 0, nullptr, input_count, value_locations);
}
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ LoadPoisoning poisoning =
+ IsSafetyCheckOf(branch->op()) == IsSafetyCheck::kSafetyCheck
+ ? load_poisoning_
+ : LoadPoisoning::kDontPoison;
+ FlagsContinuation cont =
+ FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch, poisoning);
+ VisitWordCompareZero(branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
+ LoadPoisoning poisoning = p.is_safety_check() == IsSafetyCheck::kSafetyCheck
+ ? load_poisoning_
+ : LoadPoisoning::kDontPoison;
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1),
+ poisoning);
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+ DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
+ LoadPoisoning poisoning = p.is_safety_check() == IsSafetyCheck::kSafetyCheck
+ ? load_poisoning_
+ : LoadPoisoning::kDontPoison;
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1), poisoning);
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
+}
+
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
DeoptimizeKind kind, DeoptimizeReason reason,
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 75c41c165f..e30dba0aa0 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -51,17 +51,28 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
enum EnableScheduling { kDisableScheduling, kEnableScheduling };
enum EnableSerialization { kDisableSerialization, kEnableSerialization };
+ enum EnableSwitchJumpTable {
+ kDisableSwitchJumpTable,
+ kEnableSwitchJumpTable
+ };
+ enum EnableSpeculationPoison {
+ kDisableSpeculationPoison,
+ kEnableSpeculationPoison
+ };
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
+ EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
? kEnableScheduling
: kDisableScheduling,
- EnableSerialization enable_serialization = kDisableSerialization);
+ EnableSerialization enable_serialization = kDisableSerialization,
+ LoadPoisoning poisoning = LoadPoisoning::kDontPoison);
// Visit code for the entire graph with the included schedule.
bool SelectInstructions();
@@ -158,6 +169,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
+ // TODO(jarin) This is temporary until the poisoning is universally supported.
+ static bool SupportsSpeculationPoisoning();
+
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -277,7 +291,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
enum CallBufferFlag {
kCallCodeImmediate = 1u << 0,
kCallAddressImmediate = 1u << 1,
- kCallTail = 1u << 2
+ kCallTail = 1u << 2,
+ kCallFixedTargetRegister = 1u << 3,
};
typedef base::Flags<CallBufferFlag> CallBufferFlags;
@@ -354,10 +369,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitUnreachable(Node* node);
void VisitDeadValue(Node* node);
+ void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
+
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
- const CallDescriptor* descriptor, Node* node);
+ const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
- const CallDescriptor* descriptor, Node* node);
+ const CallDescriptor* call_descriptor, Node* node);
void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node);
@@ -445,6 +462,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionScheduler* scheduler_;
EnableScheduling enable_scheduling_;
EnableSerialization enable_serialization_;
+ EnableSwitchJumpTable enable_switch_jump_table_;
+ EnableSpeculationPoison enable_speculation_poison_;
+ LoadPoisoning load_poisoning_;
Frame* frame_;
bool instruction_selection_failed_;
};
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index f335177b95..85d4533d25 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -96,6 +96,26 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
return false;
}
+bool LocationOperand::IsCompatible(LocationOperand* op) {
+ if (IsRegister() || IsStackSlot()) {
+ return op->IsRegister() || op->IsStackSlot();
+ } else if (kSimpleFPAliasing) {
+ // A backend may choose to generate the same instruction sequence regardless
+ // of the FP representation. As a result, we can relax the compatibility and
+ // allow a Double to be moved in a Float for example. However, this is only
+ // allowed if registers do not overlap.
+ return (IsFPRegister() || IsFPStackSlot()) &&
+ (op->IsFPRegister() || op->IsFPStackSlot());
+ } else if (IsFloatRegister() || IsFloatStackSlot()) {
+ return op->IsFloatRegister() || op->IsFloatStackSlot();
+ } else if (IsDoubleRegister() || IsDoubleStackSlot()) {
+ return op->IsDoubleRegister() || op->IsDoubleStackSlot();
+ } else {
+ return (IsSimd128Register() || IsSimd128StackSlot()) &&
+ (op->IsSimd128Register() || op->IsSimd128StackSlot());
+ }
+}
+
void InstructionOperand::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
PrintableInstructionOperand wrapper;
@@ -426,8 +446,12 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
+ case kFlags_branch_and_poison:
+ return os << "branch_and_poison";
case kFlags_deoptimize:
return os << "deoptimize";
+ case kFlags_deoptimize_and_poison:
+ return os << "deoptimize_and_poison";
case kFlags_set:
return os << "set";
case kFlags_trap:
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 7772f18ad9..11da39aacb 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -491,6 +491,9 @@ class LocationOperand : public InstructionOperand {
UNREACHABLE();
}
+ // Return true if the locations can be moved to one another.
+ bool IsCompatible(LocationOperand* op);
+
static LocationOperand* cast(InstructionOperand* op) {
DCHECK(op->IsAnyLocationOperand());
return static_cast<LocationOperand*>(op);
@@ -889,7 +892,8 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize;
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize ||
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison;
}
bool IsTrap() const {
@@ -1093,11 +1097,7 @@ class V8_EXPORT_PRIVATE Constant final {
private:
Type type_;
-#if V8_TARGET_ARCH_32_BIT
- RelocInfo::Mode rmode_ = RelocInfo::NONE32;
-#else
- RelocInfo::Mode rmode_ = RelocInfo::NONE64;
-#endif
+ RelocInfo::Mode rmode_ = RelocInfo::NONE;
int64_t value_;
};
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 940f0904b3..ca1bf399b0 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -75,11 +75,11 @@ void Int64Lowering::LowerGraph() {
namespace {
-int GetReturnIndexAfterLowering(
- CallDescriptor* descriptor, int old_index) {
+int GetReturnIndexAfterLowering(CallDescriptor* call_descriptor,
+ int old_index) {
int result = old_index;
for (int i = 0; i < old_index; i++) {
- if (descriptor->GetReturnType(i).representation() ==
+ if (call_descriptor->GetReturnType(i).representation() ==
MachineRepresentation::kWord64) {
result++;
}
@@ -87,9 +87,9 @@ int GetReturnIndexAfterLowering(
return result;
}
-int GetReturnCountAfterLowering(CallDescriptor* descriptor) {
+int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) {
return GetReturnIndexAfterLowering(
- descriptor, static_cast<int>(descriptor->ReturnCount()));
+ call_descriptor, static_cast<int>(call_descriptor->ReturnCount()));
}
int GetParameterIndexAfterLowering(
@@ -314,32 +314,32 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kTailCall: {
- CallDescriptor* descriptor =
+ auto call_descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
bool returns_require_lowering =
- GetReturnCountAfterLowering(descriptor) !=
- static_cast<int>(descriptor->ReturnCount());
+ GetReturnCountAfterLowering(call_descriptor) !=
+ static_cast<int>(call_descriptor->ReturnCount());
if (DefaultLowering(node) || returns_require_lowering) {
// Tail calls do not have return values, so adjusting the call
// descriptor is enough.
- auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor);
+ auto new_descriptor = GetI32WasmCallDescriptor(zone(), call_descriptor);
NodeProperties::ChangeOp(node, common()->TailCall(new_descriptor));
}
break;
}
case IrOpcode::kCall: {
- CallDescriptor* descriptor =
+ auto call_descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
bool returns_require_lowering =
- GetReturnCountAfterLowering(descriptor) !=
- static_cast<int>(descriptor->ReturnCount());
+ GetReturnCountAfterLowering(call_descriptor) !=
+ static_cast<int>(call_descriptor->ReturnCount());
if (DefaultLowering(node) || returns_require_lowering) {
// We have to adjust the call descriptor.
- NodeProperties::ChangeOp(
- node, common()->Call(GetI32WasmCallDescriptor(zone(), descriptor)));
+ NodeProperties::ChangeOp(node, common()->Call(GetI32WasmCallDescriptor(
+ zone(), call_descriptor)));
}
if (returns_require_lowering) {
- size_t return_arity = descriptor->ReturnCount();
+ size_t return_arity = call_descriptor->ReturnCount();
if (return_arity == 1) {
// We access the additional return values through projections.
Node* low_node =
@@ -355,14 +355,14 @@ void Int64Lowering::LowerNode(Node* node) {
++old_index, ++new_index) {
Node* use_node = projections[old_index];
DCHECK_EQ(ProjectionIndexOf(use_node->op()), old_index);
- DCHECK_EQ(GetReturnIndexAfterLowering(descriptor,
+ DCHECK_EQ(GetReturnIndexAfterLowering(call_descriptor,
static_cast<int>(old_index)),
static_cast<int>(new_index));
if (new_index != old_index) {
NodeProperties::ChangeOp(
use_node, common()->Projection(new_index));
}
- if (descriptor->GetReturnType(old_index).representation() ==
+ if (call_descriptor->GetReturnType(old_index).representation() ==
MachineRepresentation::kWord64) {
Node* high_node = graph()->NewNode(
common()->Projection(new_index + 1), node,
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 7ff2bf6d5e..a6d98586ad 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -39,6 +39,14 @@ class JSCallReduction {
return function->shared()->HasBuiltinFunctionId();
}
+ bool BuiltinCanBeInlined() {
+ DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ // Do not inline if the builtin may have break points.
+ return !function->shared()->HasBreakInfo();
+ }
+
// Retrieves the BuiltinFunctionId as described above.
BuiltinFunctionId GetBuiltinFunctionId() {
DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
@@ -245,7 +253,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
map_index = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
} else {
DCHECK_GE(receiver_map->elements_kind(), UINT8_ELEMENTS);
- DCHECK_LE(receiver_map->elements_kind(), UINT8_CLAMPED_ELEMENTS);
+ DCHECK_LE(receiver_map->elements_kind(), BIGINT64_ELEMENTS);
map_index = (kind == IterationKind::kValues
? Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX
: Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX) +
@@ -864,30 +872,6 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
-namespace {
-
-bool HasInstanceTypeWitness(Node* receiver, Node* effect,
- InstanceType instance_type) {
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- switch (result) {
- case NodeProperties::kUnreliableReceiverMaps:
- case NodeProperties::kReliableReceiverMaps:
- DCHECK_NE(0, receiver_maps.size());
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- if (receiver_maps[i]->instance_type() != instance_type) return false;
- }
- return true;
-
- case NodeProperties::kNoReceiverMaps:
- return false;
- }
- UNREACHABLE();
-}
-
-} // namespace
-
Reduction JSBuiltinReducer::ReduceCollectionIterator(
Node* node, InstanceType collection_instance_type,
int collection_iterator_map_index) {
@@ -895,7 +879,8 @@ Reduction JSBuiltinReducer::ReduceCollectionIterator(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
+ collection_instance_type)) {
// Figure out the proper collection iterator map.
Handle<Map> collection_iterator_map(
Map::cast(native_context()->get(collection_iterator_map_index)),
@@ -930,7 +915,8 @@ Reduction JSBuiltinReducer::ReduceCollectionSize(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
+ collection_instance_type)) {
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
receiver, effect, control);
@@ -1021,12 +1007,13 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
receiver, effect, control);
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kEliminatable);
- index = effect = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()), table,
- index, jsgraph()->NoContextConstant(), effect);
+ index = effect =
+ graph()->NewNode(common()->Call(call_descriptor),
+ jsgraph()->HeapConstant(callable.code()), table, index,
+ jsgraph()->NoContextConstant(), effect);
NodeProperties::SetType(index, type_cache_.kFixedArrayLengthType);
// Update the {index} and {table} on the {receiver}.
@@ -1235,7 +1222,7 @@ Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, JS_DATE_TYPE)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_DATE_TYPE)) {
Node* value = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSDateValue()), receiver,
effect, control);
@@ -1281,7 +1268,8 @@ Reduction JSBuiltinReducer::ReduceMapGet(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+ if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ return NoChange();
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
@@ -1324,7 +1312,8 @@ Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+ if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ return NoChange();
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
@@ -1341,420 +1330,6 @@ Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
return Replace(value);
}
-// ES6 section 20.2.2.1 Math.abs ( x )
-Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.abs(a:plain-primitive) -> NumberAbs(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAbs(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.2 Math.acos ( x )
-Reduction JSBuiltinReducer::ReduceMathAcos(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.acos(a:plain-primitive) -> NumberAcos(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAcos(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.3 Math.acosh ( x )
-Reduction JSBuiltinReducer::ReduceMathAcosh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.acosh(a:plain-primitive) -> NumberAcosh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAcosh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.4 Math.asin ( x )
-Reduction JSBuiltinReducer::ReduceMathAsin(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.asin(a:plain-primitive) -> NumberAsin(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAsin(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.5 Math.asinh ( x )
-Reduction JSBuiltinReducer::ReduceMathAsinh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.asinh(a:plain-primitive) -> NumberAsinh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAsinh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.6 Math.atan ( x )
-Reduction JSBuiltinReducer::ReduceMathAtan(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.atan(a:plain-primitive) -> NumberAtan(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAtan(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.7 Math.atanh ( x )
-Reduction JSBuiltinReducer::ReduceMathAtanh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.atanh(a:plain-primitive) -> NumberAtanh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberAtanh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.8 Math.atan2 ( y, x )
-Reduction JSBuiltinReducer::ReduceMathAtan2(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
- // Math.atan2(a:plain-primitive,
- // b:plain-primitive) -> NumberAtan2(ToNumber(a),
- // ToNumber(b))
- Node* left = ToNumber(r.left());
- Node* right = ToNumber(r.right());
- Node* value = graph()->NewNode(simplified()->NumberAtan2(), left, right);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.10 Math.ceil ( x )
-Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.ceil(a:plain-primitive) -> NumberCeil(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberCeil(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.11 Math.clz32 ( x )
-Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.clz32(a:plain-primitive) -> NumberClz32(ToUint32(a))
- Node* input = ToUint32(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberClz32(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.12 Math.cos ( x )
-Reduction JSBuiltinReducer::ReduceMathCos(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.cos(a:plain-primitive) -> NumberCos(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberCos(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.13 Math.cosh ( x )
-Reduction JSBuiltinReducer::ReduceMathCosh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.cosh(a:plain-primitive) -> NumberCosh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberCosh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.14 Math.exp ( x )
-Reduction JSBuiltinReducer::ReduceMathExp(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.exp(a:plain-primitive) -> NumberExp(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberExp(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.15 Math.expm1 ( x )
-Reduction JSBuiltinReducer::ReduceMathExpm1(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.expm1(a:number) -> NumberExpm1(a)
- Node* value = graph()->NewNode(simplified()->NumberExpm1(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.16 Math.floor ( x )
-Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.floor(a:plain-primitive) -> NumberFloor(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberFloor(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.17 Math.fround ( x )
-Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.fround(a:plain-primitive) -> NumberFround(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberFround(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.19 Math.imul ( x, y )
-Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
- // Math.imul(a:plain-primitive,
- // b:plain-primitive) -> NumberImul(ToUint32(a),
- // ToUint32(b))
- Node* left = ToUint32(r.left());
- Node* right = ToUint32(r.right());
- Node* value = graph()->NewNode(simplified()->NumberImul(), left, right);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.20 Math.log ( x )
-Reduction JSBuiltinReducer::ReduceMathLog(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.log(a:plain-primitive) -> NumberLog(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberLog(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.21 Math.log1p ( x )
-Reduction JSBuiltinReducer::ReduceMathLog1p(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.log1p(a:plain-primitive) -> NumberLog1p(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberLog1p(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.22 Math.log10 ( x )
-Reduction JSBuiltinReducer::ReduceMathLog10(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.log10(a:number) -> NumberLog10(a)
- Node* value = graph()->NewNode(simplified()->NumberLog10(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.23 Math.log2 ( x )
-Reduction JSBuiltinReducer::ReduceMathLog2(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.log2(a:number) -> NumberLog(a)
- Node* value = graph()->NewNode(simplified()->NumberLog2(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.24 Math.max ( value1, value2, ...values )
-Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchZero()) {
- // Math.max() -> -Infinity
- return Replace(jsgraph()->Constant(-V8_INFINITY));
- }
- if (r.InputsMatchAll(Type::PlainPrimitive())) {
- // Math.max(a:plain-primitive, b:plain-primitive, ...)
- Node* value = ToNumber(r.GetJSCallInput(0));
- for (int i = 1; i < r.GetJSCallArity(); i++) {
- Node* input = ToNumber(r.GetJSCallInput(i));
- value = graph()->NewNode(simplified()->NumberMax(), value, input);
- }
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.25 Math.min ( value1, value2, ...values )
-Reduction JSBuiltinReducer::ReduceMathMin(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchZero()) {
- // Math.min() -> Infinity
- return Replace(jsgraph()->Constant(V8_INFINITY));
- }
- if (r.InputsMatchAll(Type::PlainPrimitive())) {
- // Math.min(a:plain-primitive, b:plain-primitive, ...)
- Node* value = ToNumber(r.GetJSCallInput(0));
- for (int i = 1; i < r.GetJSCallArity(); i++) {
- Node* input = ToNumber(r.GetJSCallInput(i));
- value = graph()->NewNode(simplified()->NumberMin(), value, input);
- }
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.26 Math.pow ( x, y )
-Reduction JSBuiltinReducer::ReduceMathPow(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
- // Math.pow(a:plain-primitive,
- // b:plain-primitive) -> NumberPow(ToNumber(a), ToNumber(b))
- Node* left = ToNumber(r.left());
- Node* right = ToNumber(r.right());
- Node* value = graph()->NewNode(simplified()->NumberPow(), left, right);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.28 Math.round ( x )
-Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.round(a:plain-primitive) -> NumberRound(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberRound(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.9 Math.cbrt ( x )
-Reduction JSBuiltinReducer::ReduceMathCbrt(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.cbrt(a:number) -> NumberCbrt(a)
- Node* value = graph()->NewNode(simplified()->NumberCbrt(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.29 Math.sign ( x )
-Reduction JSBuiltinReducer::ReduceMathSign(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sign(a:plain-primitive) -> NumberSign(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSign(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.30 Math.sin ( x )
-Reduction JSBuiltinReducer::ReduceMathSin(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sin(a:plain-primitive) -> NumberSin(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSin(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.31 Math.sinh ( x )
-Reduction JSBuiltinReducer::ReduceMathSinh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sinh(a:plain-primitive) -> NumberSinh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSinh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.32 Math.sqrt ( x )
-Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.sqrt(a:plain-primitive) -> NumberSqrt(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberSqrt(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.33 Math.tan ( x )
-Reduction JSBuiltinReducer::ReduceMathTan(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.tan(a:plain-primitive) -> NumberTan(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberTan(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.34 Math.tanh ( x )
-Reduction JSBuiltinReducer::ReduceMathTanh(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.tanh(a:plain-primitive) -> NumberTanh(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberTanh(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.35 Math.trunc ( x )
-Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // Math.trunc(a:plain-primitive) -> NumberTrunc(ToNumber(a))
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->NumberTrunc(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
// ES6 section 20.1.2.2 Number.isFinite ( number )
Reduction JSBuiltinReducer::ReduceNumberIsFinite(Node* node) {
JSCallReduction r(node);
@@ -1908,18 +1483,6 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
return Replace(value);
}
-// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
-Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // String.fromCharCode(a:plain-primitive) -> StringFromCharCode(a)
- Node* input = ToNumber(r.GetJSCallInput(0));
- Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
- return Replace(value);
- }
- return NoChange();
-}
-
namespace {
Node* GetStringWitness(Node* node) {
@@ -1964,13 +1527,13 @@ Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
// builtin instead of the calling function.
Callable const callable =
CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState,
Operator::kNoDeopt | Operator::kNoWrite);
node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
node->ReplaceInput(1, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
}
@@ -1978,171 +1541,6 @@ Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
return NoChange();
}
-Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- Node* map = jsgraph()->HeapConstant(
- handle(native_context()->string_iterator_map(), isolate()));
-
- // Allocate new iterator and attach the iterator to this string.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(JSStringIterator::kSize, NOT_TENURED, Type::OtherObject());
- a.Store(AccessBuilder::ForMap(), map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSStringIteratorString(), receiver);
- a.Store(AccessBuilder::ForJSStringIteratorIndex(),
- jsgraph()->SmiConstant(0));
- Node* value = effect = a.Finish();
-
- // Replace it.
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- if (HasInstanceTypeWitness(receiver, effect, JS_STRING_ITERATOR_TYPE)) {
- Node* string = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
- receiver, effect, control);
- Node* index = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
- receiver, effect, control);
- Node* length = graph()->NewNode(simplified()->StringLength(), string);
-
- // branch0: if (index < length)
- Node* check0 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* etrue0 = effect;
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* done_true;
- Node* vtrue0;
- {
- done_true = jsgraph()->FalseConstant();
- Node* lead = graph()->NewNode(simplified()->StringCharCodeAt(), string,
- index, if_true0);
-
- // branch1: if ((lead & 0xFC00) === 0xD800)
- Node* check1 =
- graph()->NewNode(simplified()->NumberEqual(),
- graph()->NewNode(simplified()->NumberBitwiseAnd(),
- lead, jsgraph()->Constant(0xFC00)),
- jsgraph()->Constant(0xD800));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_true0);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1;
- {
- Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant());
- // branch2: if ((index + 1) < length)
- Node* check2 = graph()->NewNode(simplified()->NumberLessThan(),
- next_index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check2, if_true1);
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2;
- {
- Node* trail = graph()->NewNode(simplified()->StringCharCodeAt(),
- string, next_index, if_true2);
- // branch3: if ((trail & 0xFC00) === 0xDC00)
- Node* check3 = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(simplified()->NumberBitwiseAnd(), trail,
- jsgraph()->Constant(0xFC00)),
- jsgraph()->Constant(0xDC00));
- Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check3, if_true2);
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* vtrue3;
- {
- vtrue3 = graph()->NewNode(
- simplified()->NumberBitwiseOr(),
-// Need to swap the order for big-endian platforms
-#if V8_TARGET_BIG_ENDIAN
- graph()->NewNode(simplified()->NumberShiftLeft(), lead,
- jsgraph()->Constant(16)),
- trail);
-#else
- graph()->NewNode(simplified()->NumberShiftLeft(), trail,
- jsgraph()->Constant(16)),
- lead);
-#endif
- }
-
- Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
- Node* vfalse3 = lead;
- if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
- vtrue2 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue3, vfalse3, if_true2);
- }
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 = lead;
- if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vtrue1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue2, vfalse2, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1 = lead;
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue1, vfalse1, if_true0);
- vtrue0 = graph()->NewNode(
- simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), vtrue0);
-
- // Update iterator.[[NextIndex]]
- Node* char_length =
- graph()->NewNode(simplified()->StringLength(), vtrue0);
- index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
- etrue0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
- receiver, index, etrue0, if_true0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* done_false;
- Node* vfalse0;
- {
- vfalse0 = jsgraph()->UndefinedConstant();
- done_false = jsgraph()->TrueConstant();
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
- Node* done =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- done_true, done_false, control);
-
- value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
- value, done, context, effect);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
// ES section #sec-string.prototype.slice
Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
if (Node* receiver = GetStringWitness(node)) {
@@ -2173,6 +1571,7 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse;
+ Node* efalse;
{
// We need to convince TurboFan that {receiver_length}-1 is a valid
// Unsigned32 value, so we just apply NumberToUint32 to the result
@@ -2181,14 +1580,16 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
graph()->NewNode(simplified()->NumberSubtract(), receiver_length,
jsgraph()->OneConstant());
index = graph()->NewNode(simplified()->NumberToUint32(), index);
- vfalse = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
- if_false);
+ vfalse = efalse = graph()->NewNode(simplified()->StringCharAt(),
+ receiver, index, effect, if_false);
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* value =
graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, efalse, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -2196,30 +1597,6 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
return NoChange();
}
-Reduction JSBuiltinReducer::ReduceStringToLowerCaseIntl(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, receiver);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->StringToLowerCaseIntl());
- NodeProperties::SetType(node, Type::String());
- return Changed(node);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceStringToUpperCaseIntl(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, receiver);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->StringToUpperCaseIntl());
- NodeProperties::SetType(node, Type::String());
- return Changed(node);
- }
- return NoChange();
-}
-
Reduction JSBuiltinReducer::ReduceArrayBufferIsView(Node* node) {
Node* value = node->op()->ValueInputCount() >= 3
? NodeProperties::GetValueInput(node, 2)
@@ -2236,7 +1613,7 @@ Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect, instance_type)) {
// Load the {receiver}s field.
Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
receiver, effect, control);
@@ -2274,6 +1651,7 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
+ if (!r.BuiltinCanBeInlined()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
case kArrayEntries:
return ReduceArrayIterator(node, IterationKind::kEntries);
@@ -2316,105 +1694,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceCollectionIteratorNext(
node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(),
FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE);
- case kMathAbs:
- reduction = ReduceMathAbs(node);
- break;
- case kMathAcos:
- reduction = ReduceMathAcos(node);
- break;
- case kMathAcosh:
- reduction = ReduceMathAcosh(node);
- break;
- case kMathAsin:
- reduction = ReduceMathAsin(node);
- break;
- case kMathAsinh:
- reduction = ReduceMathAsinh(node);
- break;
- case kMathAtan:
- reduction = ReduceMathAtan(node);
- break;
- case kMathAtanh:
- reduction = ReduceMathAtanh(node);
- break;
- case kMathAtan2:
- reduction = ReduceMathAtan2(node);
- break;
- case kMathCbrt:
- reduction = ReduceMathCbrt(node);
- break;
- case kMathCeil:
- reduction = ReduceMathCeil(node);
- break;
- case kMathClz32:
- reduction = ReduceMathClz32(node);
- break;
- case kMathCos:
- reduction = ReduceMathCos(node);
- break;
- case kMathCosh:
- reduction = ReduceMathCosh(node);
- break;
- case kMathExp:
- reduction = ReduceMathExp(node);
- break;
- case kMathExpm1:
- reduction = ReduceMathExpm1(node);
- break;
- case kMathFloor:
- reduction = ReduceMathFloor(node);
- break;
- case kMathFround:
- reduction = ReduceMathFround(node);
- break;
- case kMathImul:
- reduction = ReduceMathImul(node);
- break;
- case kMathLog:
- reduction = ReduceMathLog(node);
- break;
- case kMathLog1p:
- reduction = ReduceMathLog1p(node);
- break;
- case kMathLog10:
- reduction = ReduceMathLog10(node);
- break;
- case kMathLog2:
- reduction = ReduceMathLog2(node);
- break;
- case kMathMax:
- reduction = ReduceMathMax(node);
- break;
- case kMathMin:
- reduction = ReduceMathMin(node);
- break;
- case kMathPow:
- reduction = ReduceMathPow(node);
- break;
- case kMathRound:
- reduction = ReduceMathRound(node);
- break;
- case kMathSign:
- reduction = ReduceMathSign(node);
- break;
- case kMathSin:
- reduction = ReduceMathSin(node);
- break;
- case kMathSinh:
- reduction = ReduceMathSinh(node);
- break;
- case kMathSqrt:
- reduction = ReduceMathSqrt(node);
- break;
- case kMathTan:
- reduction = ReduceMathTan(node);
- break;
- case kMathTanh:
- reduction = ReduceMathTanh(node);
- break;
- case kMathTrunc:
- reduction = ReduceMathTrunc(node);
- break;
case kNumberIsFinite:
reduction = ReduceNumberIsFinite(node);
break;
@@ -2445,21 +1724,10 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceCollectionIteratorNext(
node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(),
FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE);
- case kStringFromCharCode:
- reduction = ReduceStringFromCharCode(node);
- break;
case kStringConcat:
return ReduceStringConcat(node);
- case kStringIterator:
- return ReduceStringIterator(node);
- case kStringIteratorNext:
- return ReduceStringIteratorNext(node);
case kStringSlice:
return ReduceStringSlice(node);
- case kStringToLowerCaseIntl:
- return ReduceStringToLowerCaseIntl(node);
- case kStringToUpperCaseIntl:
- return ReduceStringToUpperCaseIntl(node);
case kArrayBufferIsView:
return ReduceArrayBufferIsView(node);
case kDataViewByteLength:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index b3c44c7a0f..d24bcc9746 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -69,52 +69,14 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceGlobalIsNaN(Node* node);
Reduction ReduceMapHas(Node* node);
Reduction ReduceMapGet(Node* node);
- Reduction ReduceMathAbs(Node* node);
- Reduction ReduceMathAcos(Node* node);
- Reduction ReduceMathAcosh(Node* node);
- Reduction ReduceMathAsin(Node* node);
- Reduction ReduceMathAsinh(Node* node);
- Reduction ReduceMathAtan(Node* node);
- Reduction ReduceMathAtanh(Node* node);
- Reduction ReduceMathAtan2(Node* node);
- Reduction ReduceMathCbrt(Node* node);
- Reduction ReduceMathCeil(Node* node);
- Reduction ReduceMathClz32(Node* node);
- Reduction ReduceMathCos(Node* node);
- Reduction ReduceMathCosh(Node* node);
- Reduction ReduceMathExp(Node* node);
- Reduction ReduceMathExpm1(Node* node);
- Reduction ReduceMathFloor(Node* node);
- Reduction ReduceMathFround(Node* node);
- Reduction ReduceMathImul(Node* node);
- Reduction ReduceMathLog(Node* node);
- Reduction ReduceMathLog1p(Node* node);
- Reduction ReduceMathLog10(Node* node);
- Reduction ReduceMathLog2(Node* node);
- Reduction ReduceMathMax(Node* node);
- Reduction ReduceMathMin(Node* node);
- Reduction ReduceMathPow(Node* node);
- Reduction ReduceMathRound(Node* node);
- Reduction ReduceMathSign(Node* node);
- Reduction ReduceMathSin(Node* node);
- Reduction ReduceMathSinh(Node* node);
- Reduction ReduceMathSqrt(Node* node);
- Reduction ReduceMathTan(Node* node);
- Reduction ReduceMathTanh(Node* node);
- Reduction ReduceMathTrunc(Node* node);
Reduction ReduceNumberIsFinite(Node* node);
Reduction ReduceNumberIsInteger(Node* node);
Reduction ReduceNumberIsNaN(Node* node);
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
- Reduction ReduceStringConcat(Node* node);
- Reduction ReduceStringFromCharCode(Node* node);
- Reduction ReduceStringIterator(Node* node);
- Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceStringSlice(Node* node);
- Reduction ReduceStringToLowerCaseIntl(Node* node);
- Reduction ReduceStringToUpperCaseIntl(Node* node);
+ Reduction ReduceStringConcat(Node* node);
Reduction ReduceArrayBufferIsView(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 1f8e7a2cef..12fb14c6fc 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-call-reducer.h"
#include "src/api.h"
+#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -24,6 +25,145 @@ namespace v8 {
namespace internal {
namespace compiler {
+Reduction JSCallReducer::ReduceMathUnary(Node* node, const Operator* op) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* input = NodeProperties::GetValueInput(node, 2);
+
+ input = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ input, effect, control);
+ Node* value = graph()->NewNode(op, input);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+Reduction JSCallReducer::ReduceMathBinary(Node* node, const Operator* op) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 4) {
+ Node* value = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* left = NodeProperties::GetValueInput(node, 2);
+ Node* right = NodeProperties::GetValueInput(node, 3);
+ left = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ left, effect, control);
+ right = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ right, effect, control);
+ Node* value = graph()->NewNode(op, left, right);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+Reduction JSCallReducer::ReduceMathImul(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 4) {
+ Node* value = jsgraph()->ZeroConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* left = NodeProperties::GetValueInput(node, 2);
+ Node* right = NodeProperties::GetValueInput(node, 3);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ left = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ left, effect, control);
+ right = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ right, effect, control);
+ left = graph()->NewNode(simplified()->NumberToUint32(), left);
+ right = graph()->NewNode(simplified()->NumberToUint32(), right);
+ Node* value = graph()->NewNode(simplified()->NumberImul(), left, right);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+Reduction JSCallReducer::ReduceMathClz32(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->Constant(32);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* input = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ input = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ input, effect, control);
+ input = graph()->NewNode(simplified()->NumberToUint32(), input);
+ Node* value = graph()->NewNode(simplified()->NumberClz32(), input);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// ES6 section 20.2.2.24 Math.max ( value1, value2, ...values )
+// ES6 section 20.2.2.25 Math.min ( value1, value2, ...values )
+Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op,
+ Node* empty_value) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() <= 2) {
+ ReplaceWithValue(node, empty_value);
+ return Replace(empty_value);
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* value = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ NodeProperties::GetValueInput(node, 2), effect, control);
+ for (int i = 3; i < node->op()->ValueInputCount(); i++) {
+ Node* input = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ NodeProperties::GetValueInput(node, i), effect, control);
+ value = graph()->NewNode(op, value, input);
+ }
+
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
@@ -112,7 +252,7 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
}
} else {
ReplaceWithValue(node, value);
- return Replace(node);
+ return Replace(value);
}
return NoChange();
}
@@ -338,12 +478,25 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- Handle<JSFunction> call = Handle<JSFunction>::cast(
- HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
// Change context of {node} to the Function.prototype.call context,
// to ensure any exception is thrown in the correct context.
- NodeProperties::ReplaceContextInput(
- node, jsgraph()->HeapConstant(handle(call->context(), isolate())));
+ Node* context;
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ context = jsgraph()->HeapConstant(handle(function->context(), isolate()));
+ } else {
+ context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+ }
+ NodeProperties::ReplaceContextInput(node, context);
+ NodeProperties::ReplaceEffectInput(node, effect);
+
// Remove the target from {node} and use the receiver as target instead, and
// the thisArg becomes the new target. If thisArg was not provided, insert
// undefined instead.
@@ -672,14 +825,14 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) {
Node* vtrue;
{
Callable callable = CodeFactory::GetProperty(isolate());
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
MachineType::AnyTagged(), 1);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
vtrue = etrue = if_true =
- graph()->NewNode(common()->Call(desc), stub_code, target, key, context,
- frame_state, etrue, if_true);
+ graph()->NewNode(common()->Call(call_descriptor), stub_code, target,
+ key, context, frame_state, etrue, if_true);
}
// Rewire potential exception edges.
@@ -810,8 +963,8 @@ void JSCallReducer::WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
eloop->ReplaceInput(1, effect);
}
-Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayForEach(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -884,7 +1037,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayForEachLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_fail = nullptr;
@@ -907,7 +1060,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = if_true;
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayForEachLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -955,7 +1108,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
}
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayForEachLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -998,254 +1151,21 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
return Replace(jsgraph()->UndefinedConstant());
}
-Reduction JSCallReducer::ReduceArrayReduce(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayReduce(Node* node,
+ ArrayReduceDirection direction,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
-
- Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
-
- // Try to determine the {receiver} map.
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* fncallback = node->op()->ValueInputCount() > 2
- ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
-
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
-
- ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
- ? PACKED_DOUBLE_ELEMENTS
- : PACKED_ELEMENTS;
- for (Handle<Map> receiver_map : receiver_maps) {
- ElementsKind next_kind = receiver_map->elements_kind();
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
- }
- if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsHoleyElementsKind(next_kind)) {
- kind = HOLEY_ELEMENTS;
- }
- }
-
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
-
- // If we have unreliable maps, we need a map check.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
- }
-
- Node* original_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
- receiver, effect, control);
-
- Node* k = jsgraph()->ZeroConstant();
-
- std::vector<Node*> checkpoint_params({receiver, fncallback, k,
- original_length,
- jsgraph()->UndefinedConstant()});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
-
- // Check whether the given callback function is callable. Note that this has
- // to happen outside the loop to make sure we also throw on empty arrays.
- Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
- Node* check_fail = nullptr;
- Node* check_throw = nullptr;
- WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
- &control, &check_fail, &check_throw);
-
- // Set initial accumulator value
- Node* cur = jsgraph()->TheHoleConstant();
-
- Node* initial_element_check_fail = nullptr;
- Node* initial_element_check_throw = nullptr;
- if (node->op()->ValueInputCount() > 3) {
- cur = NodeProperties::GetValueInput(node, 3);
- } else {
- Node* check =
- graph()->NewNode(simplified()->NumberEqual(), original_length, k);
- Node* check_branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- initial_element_check_fail =
- graph()->NewNode(common()->IfTrue(), check_branch);
- initial_element_check_throw = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
- context, check_frame_state, effect, initial_element_check_fail);
- control = graph()->NewNode(common()->IfFalse(), check_branch);
-
- cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
- k = graph()->NewNode(simplified()->NumberAdd(), k,
- jsgraph()->OneConstant());
- }
-
- // Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* kloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
- Node* curloop = cur = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
- checkpoint_params[2] = k;
- checkpoint_params[4] = curloop;
-
- control = loop;
- effect = eloop;
-
- Node* continue_test =
- graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
- Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- continue_test, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
- control = if_true;
-
- Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceLoopEagerDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
- outer_frame_state, ContinuationFrameStateMode::EAGER);
-
- effect =
- graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
-
- // Make sure the map hasn't changed during the iteration
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
-
- Node* element =
- SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
- Node* next_k =
- graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
- checkpoint_params[2] = next_k;
-
- Node* hole_true = nullptr;
- Node* hole_false = nullptr;
- Node* effect_true = effect;
-
- if (IsHoleyElementsKind(kind)) {
- // Holey elements kind require a hole check and skipping of the element in
- // the case of a hole.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- hole_true = graph()->NewNode(common()->IfTrue(), branch);
- hole_false = graph()->NewNode(common()->IfFalse(), branch);
- control = hole_false;
-
- // The contract is that we don't leak "the hole" into "user JavaScript",
- // so we must rename the {element} here to explicitly exclude "the hole"
- // from the type of {element}.
- element = effect = graph()->NewNode(
- common()->TypeGuard(Type::NonInternal()), element, effect, control);
- }
-
- frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
-
- Node* next_cur = control = effect =
- graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
- jsgraph()->UndefinedConstant(), cur, element, k,
- receiver, context, frame_state, effect, control);
-
- // Rewire potential exception edges.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
- &check_fail, &control);
- }
-
- if (IsHoleyElementsKind(kind)) {
- Node* after_call_control = control;
- Node* after_call_effect = effect;
- control = hole_true;
- effect = effect_true;
-
- control = graph()->NewNode(common()->Merge(2), control, after_call_control);
- effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
- control);
- next_cur =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
- next_cur, control);
- }
-
- k = next_k;
- cur = next_cur;
-
- loop->ReplaceInput(1, control);
- kloop->ReplaceInput(1, k);
- curloop->ReplaceInput(1, cur);
- eloop->ReplaceInput(1, effect);
-
- control = if_false;
- effect = eloop;
-
- // Wire up the branch for the case when IsCallable fails for the callback.
- // Since {check_throw} is an unconditional throw, it's impossible to
- // return a successful completion. Therefore, we simply connect the successful
- // completion to the graph end.
- Node* throw_node =
- graph()->NewNode(common()->Throw(), check_throw, check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
-
- if (node->op()->ValueInputCount() <= 3) {
- // Wire up the branch for the case when an array is empty.
- // Since {check_throw} is an unconditional throw, it's impossible to
- // return a successful completion. Therefore, we simply connect the
- // successful completion to the graph end.
- Node* throw_node =
- graph()->NewNode(common()->Throw(), initial_element_check_throw,
- initial_element_check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- }
-
- ReplaceWithValue(node, curloop, effect, control);
- return Replace(curloop);
-} // namespace compiler
-
-Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
- Node* node) {
- if (!FLAG_turbo_inline_array_builtins) return NoChange();
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ bool left = direction == ArrayReduceDirection::kLeft;
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
- if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
- return NoChange();
- }
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -1258,25 +1178,22 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
- ? PACKED_DOUBLE_ELEMENTS
- : PACKED_ELEMENTS;
+ ElementsKind kind = receiver_maps[0]->elements_kind();
for (Handle<Map> receiver_map : receiver_maps) {
- ElementsKind next_kind = receiver_map->elements_kind();
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
- }
- if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
- return NoChange();
- }
- if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
return NoChange();
- }
- if (IsHoleyElementsKind(next_kind)) {
- kind = HOLEY_ELEMENTS;
- }
}
+ std::function<Node*(Node*)> hole_check = [this, kind](Node* element) {
+ if (IsDoubleElementsKind(kind)) {
+ return graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ return graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ };
+
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
@@ -1293,48 +1210,84 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
receiver, effect, control);
- Node* k = graph()->NewNode(simplified()->NumberSubtract(), original_length,
- jsgraph()->OneConstant());
-
- std::vector<Node*> checkpoint_params({receiver, fncallback, k,
- original_length,
- jsgraph()->UndefinedConstant()});
- const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* initial_index =
+ left ? jsgraph()->ZeroConstant()
+ : graph()->NewNode(simplified()->NumberSubtract(), original_length,
+ jsgraph()->OneConstant());
+ const Operator* next_op =
+ left ? simplified()->NumberAdd() : simplified()->NumberSubtract();
+ Node* k = initial_index;
- // Check whether the given callback function is callable. Note that this has
- // to happen outside the loop to make sure we also throw on empty arrays.
- Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_frame_state;
+ {
+ Builtins::Name builtin_lazy =
+ left ? Builtins::kArrayReduceLoopLazyDeoptContinuation
+ : Builtins::kArrayReduceRightLoopLazyDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, k, original_length,
+ jsgraph()->UndefinedConstant()});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_lazy, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters - 1, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+ }
Node* check_fail = nullptr;
Node* check_throw = nullptr;
+ // Check whether the given callback function is callable. Note that
+ // this has to happen outside the loop to make sure we also throw on
+ // empty arrays.
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
&control, &check_fail, &check_throw);
// Set initial accumulator value
- Node* cur = nullptr;
+ Node* cur = jsgraph()->TheHoleConstant();
- Node* initial_element_check_fail = nullptr;
- Node* initial_element_check_throw = nullptr;
if (node->op()->ValueInputCount() > 3) {
cur = NodeProperties::GetValueInput(node, 3);
} else {
- Node* check = graph()->NewNode(simplified()->NumberEqual(), original_length,
- jsgraph()->SmiConstant(0));
- Node* check_branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- initial_element_check_fail =
- graph()->NewNode(common()->IfTrue(), check_branch);
- initial_element_check_throw = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
- context, check_frame_state, effect, initial_element_check_fail);
- control = graph()->NewNode(common()->IfFalse(), check_branch);
+ // Find first/last non holey element. In case the search fails, we need a
+ // deopt continuation.
+ Builtins::Name builtin_eager =
+ left ? Builtins::kArrayReducePreLoopEagerDeoptContinuation
+ : Builtins::kArrayReduceRightPreLoopEagerDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* find_first_element_frame_state =
+ CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_eager, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node* loop = control;
+ Node* eloop = effect;
+ effect = graph()->NewNode(common()->Checkpoint(),
+ find_first_element_frame_state, effect, control);
+ Node* continue_test =
+ left ? graph()->NewNode(simplified()->NumberLessThan(), k,
+ original_length)
+ : graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), k);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNoInitialElement),
+ continue_test, effect, control);
cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
- k = graph()->NewNode(simplified()->NumberSubtract(), k,
- jsgraph()->OneConstant());
+ Node* next_k = graph()->NewNode(next_op, k, jsgraph()->OneConstant());
+
+ Node* hole_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ hole_check(cur), control);
+ Node* found_el = graph()->NewNode(common()->IfFalse(), hole_branch);
+ control = found_el;
+ Node* is_hole = graph()->NewNode(common()->IfTrue(), hole_branch);
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, is_hole, effect);
+ // We did the hole-check, so exclude hole from the type.
+ cur = effect = graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
+ cur, effect, control);
+ k = next_k;
}
// Start the loop.
@@ -1347,14 +1300,16 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
Node* curloop = cur = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
- checkpoint_params[2] = k;
- checkpoint_params[4] = curloop;
control = loop;
effect = eloop;
- Node* continue_test = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->ZeroConstant(), k);
+ Node* continue_test =
+ left
+ ? graph()->NewNode(simplified()->NumberLessThan(), k, original_length)
+ : graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), k);
+
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
continue_test, control);
@@ -1362,14 +1317,20 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
control = if_true;
- Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function,
- Builtins::kArrayReduceRightLoopEagerDeoptContinuation, node->InputAt(0),
- context, &checkpoint_params[0], stack_parameters, outer_frame_state,
- ContinuationFrameStateMode::EAGER);
-
- effect =
- graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ {
+ Builtins::Name builtin_eager =
+ left ? Builtins::kArrayReduceLoopEagerDeoptContinuation
+ : Builtins::kArrayReduceRightLoopEagerDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, k, original_length, curloop});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_eager, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
// Make sure the map hasn't changed during the iteration
effect = graph()->NewNode(
@@ -1379,9 +1340,7 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
- Node* next_k = graph()->NewNode(simplified()->NumberSubtract(), k,
- jsgraph()->OneConstant());
- checkpoint_params[2] = next_k;
+ Node* next_k = graph()->NewNode(next_op, k, jsgraph()->OneConstant());
Node* hole_true = nullptr;
Node* hole_false = nullptr;
@@ -1390,10 +1349,8 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
if (IsHoleyElementsKind(kind)) {
// Holey elements kind require a hole check and skipping of the element in
// the case of a hole.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ hole_check(element), control);
hole_true = graph()->NewNode(common()->IfTrue(), branch);
hole_false = graph()->NewNode(common()->IfFalse(), branch);
control = hole_false;
@@ -1405,15 +1362,24 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
common()->TypeGuard(Type::NonInternal()), element, effect, control);
}
- frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
- outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* next_cur;
+ {
+ Builtins::Name builtin_lazy =
+ left ? Builtins::kArrayReduceLoopLazyDeoptContinuation
+ : Builtins::kArrayReduceRightLoopLazyDeoptContinuation;
+ const std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, next_k, original_length, curloop});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, builtin_lazy, node->InputAt(0), context,
+ checkpoint_params.data(), stack_parameters - 1, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
- Node* next_cur = control = effect =
- graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
- jsgraph()->UndefinedConstant(), cur, element, k,
- receiver, context, frame_state, effect, control);
+ next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
+ }
// Rewire potential exception edges.
Node* on_exception = nullptr;
@@ -1455,23 +1421,12 @@ Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- if (node->op()->ValueInputCount() <= 3) {
- // Wire up the branch for the case when an array is empty.
- // Since {check_throw} is an unconditional throw, it's impossible to
- // return a successful completion. Therefore, we simply connect the
- // successful completion to the graph end.
- Node* throw_node =
- graph()->NewNode(common()->Throw(), initial_element_check_throw,
- initial_element_check_fail);
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- }
-
ReplaceWithValue(node, curloop, effect, control);
return Replace(curloop);
-} // namespace compiler
+}
-Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayMap(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1498,7 +1453,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
@@ -1547,7 +1502,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayMapLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayMapLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_fail = nullptr;
@@ -1570,7 +1525,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
control = if_true;
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayMapLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayMapLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -1619,7 +1574,7 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
// This frame state is dealt with by hand in
// ArrayMapLoopLazyDeoptContinuation.
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayMapLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayMapLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -1671,8 +1626,8 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
return Replace(a);
}
-Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayFilter(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -1698,7 +1653,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
// The output array is packed (filter doesn't visit holes).
@@ -1766,7 +1721,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
@@ -1794,7 +1749,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -1851,7 +1806,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -1880,7 +1835,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
callback_value});
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayFilterLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayFilterLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -1926,9 +1881,8 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
return Replace(a);
}
-Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
- Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2014,8 +1968,8 @@ Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
Node* check_throw = nullptr;
{
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, lazy_continuation_builtin, node->InputAt(0),
- context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ jsgraph(), shared, lazy_continuation_builtin, node->InputAt(0), context,
+ &checkpoint_params[0], stack_parameters, outer_frame_state,
ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, frame_state, effect,
&control, &check_fail, &check_throw);
@@ -2040,7 +1994,7 @@ Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
// Check the map hasn't changed during the iteration.
{
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, eager_continuation_builtin, node->InputAt(0),
+ jsgraph(), shared, eager_continuation_builtin, node->InputAt(0),
context, &checkpoint_params[0], stack_parameters, outer_frame_state,
ContinuationFrameStateMode::EAGER);
@@ -2083,7 +2037,7 @@ Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
static_cast<int>(call_checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, after_callback_lazy_continuation_builtin,
+ jsgraph(), shared, after_callback_lazy_continuation_builtin,
node->InputAt(0), context, &call_checkpoint_params[0],
call_stack_parameters, outer_frame_state,
ContinuationFrameStateMode::LAZY);
@@ -2264,8 +2218,8 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
return element;
}
-Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArrayEvery(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2291,7 +2245,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
@@ -2336,7 +2290,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayEveryLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
@@ -2362,7 +2316,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayEveryLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayEveryLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -2418,7 +2372,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArrayEveryLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -2484,8 +2438,8 @@ Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
return Replace(return_value);
}
-Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
- Node* node) {
+Reduction JSCallReducer::ReduceArraySome(Node* node,
+ Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -2511,17 +2465,12 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
if (receiver_maps.size() == 0) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
- // TODO(pwong): Handle holey double elements kinds.
- if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
- return NoChange();
- }
-
for (Handle<Map> receiver_map : receiver_maps) {
if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
// We can handle different maps, as long as their elements kind are the
@@ -2563,7 +2512,7 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArraySomeLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
@@ -2594,7 +2543,7 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArraySomeLoopEagerDeoptContinuation,
+ jsgraph(), shared, Builtins::kArraySomeLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -2621,8 +2570,13 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
if (IsHoleyElementsKind(kind)) {
// Holey elements kind require a hole check and skipping of the element in
// the case of a hole.
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
hole_true = graph()->NewNode(common()->IfTrue(), branch);
@@ -2645,7 +2599,7 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
const int stack_parameters = static_cast<int>(checkpoint_params.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kArraySomeLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -2713,19 +2667,20 @@ Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
return Replace(return_value);
}
-Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
- Handle<JSFunction> function) {
+Reduction JSCallReducer::ReduceCallApiFunction(
+ Node* node, Handle<SharedFunctionInfo> shared) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
+ Node* target = NodeProperties::GetValueInput(node, 0);
Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
? jsgraph()->HeapConstant(global_proxy())
: NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(function->shared()->function_data()));
- Handle<Context> context(function->context());
+ FunctionTemplateInfo::cast(shared->function_data()));
// CallApiCallbackStub expects the target in a register, so we count it out,
// and counts the receiver as an implicit argument, so we count the receiver
@@ -2774,6 +2729,11 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
}
}
+ // Load the {target}s context.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+
// CallApiCallbackStub's register arguments: code, target, call data, holder,
// function address.
// TODO(turbofan): Consider introducing a JSCallApiCallback operator for
@@ -2784,7 +2744,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
Handle<Object> data(call_handler_info->data(), isolate());
CallApiCallbackStub stub(isolate(), argc);
CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), cid,
cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
@@ -2797,14 +2757,14 @@ Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
&api_function, ExternalReference::DIRECT_API_CALL, isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(stub.GetCode()));
- node->ReplaceInput(1, jsgraph()->Constant(context));
+ node->ReplaceInput(1, context);
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(data));
node->InsertInput(graph()->zone(), 3, holder);
node->InsertInput(graph()->zone(), 4,
jsgraph()->ExternalConstant(function_reference));
node->ReplaceInput(5, receiver);
- // Remove context input.
- node->RemoveInput(6 + argc);
+ node->RemoveInput(6 + argc); // Remove context input.
+ node->ReplaceInput(7 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
@@ -3082,93 +3042,11 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (m.HasValue()) {
if (m.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
-
- // Raise a TypeError if the {target} is a "classConstructor".
- if (IsClassConstructor(shared->kind())) {
- NodeProperties::ReplaceValueInputs(node, target);
- NodeProperties::ChangeOp(
- node, javascript()->CallRuntime(
- Runtime::kThrowConstructorNonCallableError, 1));
- return Changed(node);
- }
// Don't inline cross native context.
if (function->native_context() != *native_context()) return NoChange();
- // Check for known builtin functions.
- switch (shared->code()->builtin_index()) {
- case Builtins::kArrayConstructor:
- return ReduceArrayConstructor(node);
- case Builtins::kBooleanConstructor:
- return ReduceBooleanConstructor(node);
- case Builtins::kFunctionPrototypeApply:
- return ReduceFunctionPrototypeApply(node);
- case Builtins::kFastFunctionPrototypeBind:
- return ReduceFunctionPrototypeBind(node);
- case Builtins::kFunctionPrototypeCall:
- return ReduceFunctionPrototypeCall(node);
- case Builtins::kFunctionPrototypeHasInstance:
- return ReduceFunctionPrototypeHasInstance(node);
- case Builtins::kObjectConstructor:
- return ReduceObjectConstructor(node);
- case Builtins::kObjectGetPrototypeOf:
- return ReduceObjectGetPrototypeOf(node);
- case Builtins::kObjectIs:
- return ReduceObjectIs(node);
- case Builtins::kObjectPrototypeGetProto:
- return ReduceObjectPrototypeGetProto(node);
- case Builtins::kObjectPrototypeHasOwnProperty:
- return ReduceObjectPrototypeHasOwnProperty(node);
- case Builtins::kObjectPrototypeIsPrototypeOf:
- return ReduceObjectPrototypeIsPrototypeOf(node);
- case Builtins::kReflectApply:
- return ReduceReflectApply(node);
- case Builtins::kReflectConstruct:
- return ReduceReflectConstruct(node);
- case Builtins::kReflectGet:
- return ReduceReflectGet(node);
- case Builtins::kReflectGetPrototypeOf:
- return ReduceReflectGetPrototypeOf(node);
- case Builtins::kReflectHas:
- return ReduceReflectHas(node);
- case Builtins::kArrayForEach:
- return ReduceArrayForEach(function, node);
- case Builtins::kArrayMap:
- return ReduceArrayMap(function, node);
- case Builtins::kArrayFilter:
- return ReduceArrayFilter(function, node);
- case Builtins::kArrayReduce:
- return ReduceArrayReduce(function, node);
- case Builtins::kArrayReduceRight:
- return ReduceArrayReduceRight(function, node);
- case Builtins::kArrayPrototypeFind:
- return ReduceArrayFind(ArrayFindVariant::kFind, function, node);
- case Builtins::kArrayPrototypeFindIndex:
- return ReduceArrayFind(ArrayFindVariant::kFindIndex, function, node);
- case Builtins::kArrayEvery:
- return ReduceArrayEvery(function, node);
- case Builtins::kArrayPrototypePush:
- return ReduceArrayPrototypePush(node);
- case Builtins::kArrayPrototypePop:
- return ReduceArrayPrototypePop(node);
- case Builtins::kArrayPrototypeShift:
- return ReduceArrayPrototypeShift(node);
- case Builtins::kReturnReceiver:
- return ReduceReturnReceiver(node);
- case Builtins::kStringPrototypeIndexOf:
- return ReduceStringPrototypeIndexOf(function, node);
- case Builtins::kStringPrototypeCharAt:
- return ReduceStringPrototypeCharAt(node);
- case Builtins::kStringPrototypeCharCodeAt:
- return ReduceStringPrototypeCharCodeAt(node);
- default:
- break;
- }
-
- if (!FLAG_runtime_stats && shared->IsApiFunction()) {
- return ReduceCallApiFunction(node, function);
- }
+ return ReduceJSCall(node, handle(function->shared(), isolate()));
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(m.Value());
@@ -3206,6 +3084,15 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
+ // If {target} is the result of a JSCreateClosure operation, we can
+ // just immediately try to inline based on the SharedFunctionInfo,
+ // since TurboFan generally doesn't inline cross-context, and hence
+ // the {target} must have the same native context as the call site.
+ if (target->opcode() == IrOpcode::kJSCreateClosure) {
+ CreateClosureParameters const& p = CreateClosureParametersOf(target->op());
+ return ReduceJSCall(node, p.shared_info());
+ }
+
// If {target} is the result of a JSCreateBoundFunction operation,
// we can just fold the construction and call the bound target
// function directly instead.
@@ -3240,9 +3127,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
- // Extract feedback from the {node} using the CallICNexus.
+ // Extract feedback from the {node} using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
if (flags() & kBailoutOnUninitialized) {
// Introduce a SOFT deopt if the call {node} wasn't executed so far.
@@ -3281,6 +3168,209 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceJSCall(Node* node,
+ Handle<SharedFunctionInfo> shared) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+
+ // Do not reduce calls to functions with break points.
+ if (shared->HasBreakInfo()) return NoChange();
+
+ // Raise a TypeError if the {target} is a "classConstructor".
+ if (IsClassConstructor(shared->kind())) {
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(
+ Runtime::kThrowConstructorNonCallableError, 1));
+ return Changed(node);
+ }
+
+ // Check for known builtin functions.
+ switch (shared->code()->builtin_index()) {
+ case Builtins::kArrayConstructor:
+ return ReduceArrayConstructor(node);
+ case Builtins::kBooleanConstructor:
+ return ReduceBooleanConstructor(node);
+ case Builtins::kFunctionPrototypeApply:
+ return ReduceFunctionPrototypeApply(node);
+ case Builtins::kFastFunctionPrototypeBind:
+ return ReduceFunctionPrototypeBind(node);
+ case Builtins::kFunctionPrototypeCall:
+ return ReduceFunctionPrototypeCall(node);
+ case Builtins::kFunctionPrototypeHasInstance:
+ return ReduceFunctionPrototypeHasInstance(node);
+ case Builtins::kObjectConstructor:
+ return ReduceObjectConstructor(node);
+ case Builtins::kObjectGetPrototypeOf:
+ return ReduceObjectGetPrototypeOf(node);
+ case Builtins::kObjectIs:
+ return ReduceObjectIs(node);
+ case Builtins::kObjectPrototypeGetProto:
+ return ReduceObjectPrototypeGetProto(node);
+ case Builtins::kObjectPrototypeHasOwnProperty:
+ return ReduceObjectPrototypeHasOwnProperty(node);
+ case Builtins::kObjectPrototypeIsPrototypeOf:
+ return ReduceObjectPrototypeIsPrototypeOf(node);
+ case Builtins::kReflectApply:
+ return ReduceReflectApply(node);
+ case Builtins::kReflectConstruct:
+ return ReduceReflectConstruct(node);
+ case Builtins::kReflectGet:
+ return ReduceReflectGet(node);
+ case Builtins::kReflectGetPrototypeOf:
+ return ReduceReflectGetPrototypeOf(node);
+ case Builtins::kReflectHas:
+ return ReduceReflectHas(node);
+ case Builtins::kArrayForEach:
+ return ReduceArrayForEach(node, shared);
+ case Builtins::kArrayMap:
+ return ReduceArrayMap(node, shared);
+ case Builtins::kArrayFilter:
+ return ReduceArrayFilter(node, shared);
+ case Builtins::kArrayReduce:
+ return ReduceArrayReduce(node, ArrayReduceDirection::kLeft, shared);
+ case Builtins::kArrayReduceRight:
+ return ReduceArrayReduce(node, ArrayReduceDirection::kRight, shared);
+ case Builtins::kArrayPrototypeFind:
+ return ReduceArrayFind(node, ArrayFindVariant::kFind, shared);
+ case Builtins::kArrayPrototypeFindIndex:
+ return ReduceArrayFind(node, ArrayFindVariant::kFindIndex, shared);
+ case Builtins::kArrayEvery:
+ return ReduceArrayEvery(node, shared);
+ case Builtins::kArraySome:
+ return ReduceArraySome(node, shared);
+ case Builtins::kArrayPrototypePush:
+ return ReduceArrayPrototypePush(node);
+ case Builtins::kArrayPrototypePop:
+ return ReduceArrayPrototypePop(node);
+ case Builtins::kArrayPrototypeShift:
+ return ReduceArrayPrototypeShift(node);
+ case Builtins::kMathAbs:
+ return ReduceMathUnary(node, simplified()->NumberAbs());
+ case Builtins::kMathAcos:
+ return ReduceMathUnary(node, simplified()->NumberAcos());
+ case Builtins::kMathAcosh:
+ return ReduceMathUnary(node, simplified()->NumberAcosh());
+ case Builtins::kMathAsin:
+ return ReduceMathUnary(node, simplified()->NumberAsin());
+ case Builtins::kMathAsinh:
+ return ReduceMathUnary(node, simplified()->NumberAsinh());
+ case Builtins::kMathAtan:
+ return ReduceMathUnary(node, simplified()->NumberAtan());
+ case Builtins::kMathAtanh:
+ return ReduceMathUnary(node, simplified()->NumberAtanh());
+ case Builtins::kMathCbrt:
+ return ReduceMathUnary(node, simplified()->NumberCbrt());
+ case Builtins::kMathCeil:
+ return ReduceMathUnary(node, simplified()->NumberCeil());
+ case Builtins::kMathCos:
+ return ReduceMathUnary(node, simplified()->NumberCos());
+ case Builtins::kMathCosh:
+ return ReduceMathUnary(node, simplified()->NumberCosh());
+ case Builtins::kMathExp:
+ return ReduceMathUnary(node, simplified()->NumberExp());
+ case Builtins::kMathExpm1:
+ return ReduceMathUnary(node, simplified()->NumberExpm1());
+ case Builtins::kMathFloor:
+ return ReduceMathUnary(node, simplified()->NumberFloor());
+ case Builtins::kMathFround:
+ return ReduceMathUnary(node, simplified()->NumberFround());
+ case Builtins::kMathLog:
+ return ReduceMathUnary(node, simplified()->NumberLog());
+ case Builtins::kMathLog1p:
+ return ReduceMathUnary(node, simplified()->NumberLog1p());
+ case Builtins::kMathLog10:
+ return ReduceMathUnary(node, simplified()->NumberLog10());
+ case Builtins::kMathLog2:
+ return ReduceMathUnary(node, simplified()->NumberLog2());
+ case Builtins::kMathRound:
+ return ReduceMathUnary(node, simplified()->NumberRound());
+ case Builtins::kMathSign:
+ return ReduceMathUnary(node, simplified()->NumberSign());
+ case Builtins::kMathSin:
+ return ReduceMathUnary(node, simplified()->NumberSin());
+ case Builtins::kMathSinh:
+ return ReduceMathUnary(node, simplified()->NumberSinh());
+ case Builtins::kMathSqrt:
+ return ReduceMathUnary(node, simplified()->NumberSqrt());
+ case Builtins::kMathTan:
+ return ReduceMathUnary(node, simplified()->NumberTan());
+ case Builtins::kMathTanh:
+ return ReduceMathUnary(node, simplified()->NumberTanh());
+ case Builtins::kMathTrunc:
+ return ReduceMathUnary(node, simplified()->NumberTrunc());
+ case Builtins::kMathAtan2:
+ return ReduceMathBinary(node, simplified()->NumberAtan2());
+ case Builtins::kMathPow:
+ return ReduceMathBinary(node, simplified()->NumberPow());
+ case Builtins::kMathClz32:
+ return ReduceMathClz32(node);
+ case Builtins::kMathImul:
+ return ReduceMathImul(node);
+ case Builtins::kMathMax:
+ return ReduceMathMinMax(node, simplified()->NumberMax(),
+ jsgraph()->Constant(-V8_INFINITY));
+ case Builtins::kMathMin:
+ return ReduceMathMinMax(node, simplified()->NumberMin(),
+ jsgraph()->Constant(V8_INFINITY));
+ case Builtins::kReturnReceiver:
+ return ReduceReturnReceiver(node);
+ case Builtins::kStringPrototypeIndexOf:
+ return ReduceStringPrototypeIndexOf(node);
+ case Builtins::kStringPrototypeCharAt:
+ return ReduceStringPrototypeStringAt(simplified()->StringCharAt(), node);
+ case Builtins::kStringPrototypeCharCodeAt:
+ return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(),
+ node);
+ case Builtins::kStringPrototypeCodePointAt:
+ return ReduceStringPrototypeStringAt(
+ simplified()->StringCodePointAt(UnicodeEncoding::UTF32), node);
+ case Builtins::kStringPrototypeSubstring:
+ return ReduceStringPrototypeSubstring(node);
+ case Builtins::kStringPrototypeSlice:
+ return ReduceStringPrototypeSlice(node);
+#ifdef V8_INTL_SUPPORT
+ case Builtins::kStringPrototypeToLowerCaseIntl:
+ return ReduceStringPrototypeToLowerCaseIntl(node);
+ case Builtins::kStringPrototypeToUpperCaseIntl:
+ return ReduceStringPrototypeToUpperCaseIntl(node);
+#endif // V8_INTL_SUPPORT
+ case Builtins::kStringFromCharCode:
+ return ReduceStringFromCharCode(node);
+ case Builtins::kStringPrototypeIterator:
+ return ReduceStringPrototypeIterator(node);
+ case Builtins::kStringIteratorPrototypeNext:
+ return ReduceStringIteratorPrototypeNext(node);
+ case Builtins::kAsyncFunctionPromiseCreate:
+ return ReduceAsyncFunctionPromiseCreate(node);
+ case Builtins::kAsyncFunctionPromiseRelease:
+ return ReduceAsyncFunctionPromiseRelease(node);
+ case Builtins::kPromiseCapabilityDefaultReject:
+ return ReducePromiseCapabilityDefaultReject(node);
+ case Builtins::kPromiseCapabilityDefaultResolve:
+ return ReducePromiseCapabilityDefaultResolve(node);
+ case Builtins::kPromiseInternalConstructor:
+ return ReducePromiseInternalConstructor(node);
+ case Builtins::kPromiseInternalReject:
+ return ReducePromiseInternalReject(node);
+ case Builtins::kPromiseInternalResolve:
+ return ReducePromiseInternalResolve(node);
+ case Builtins::kPromisePrototypeCatch:
+ return ReducePromisePrototypeCatch(node);
+ case Builtins::kPromisePrototypeFinally:
+ return ReducePromisePrototypeFinally(node);
+ case Builtins::kPromisePrototypeThen:
+ return ReducePromisePrototypeThen(node);
+ default:
+ break;
+ }
+
+ if (!FLAG_runtime_stats && shared->IsApiFunction()) {
+ return ReduceCallApiFunction(node, shared);
+ }
+ return NoChange();
+}
+
Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -3310,9 +3400,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Extract feedback from the {node} using the CallICNexus.
+ // Extract feedback from the {node} using the FeedbackNexus.
if (p.feedback().IsValid()) {
- CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
if (flags() & kBailoutOnUninitialized) {
// Introduce a SOFT deopt if the construct {node} wasn't executed so
@@ -3394,6 +3484,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
if (m.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ // Do not reduce constructors with break points.
+ if (function->shared()->HasBreakInfo()) return NoChange();
+
// Don't inline cross native context.
if (function->native_context() != *native_context()) return NoChange();
@@ -3431,6 +3524,11 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return Changed(node);
}
}
+
+ // Check for the PromiseConstructor
+ if (*function == function->native_context()->promise_function()) {
+ return ReducePromiseConstructor(node);
+ }
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(m.Value());
@@ -3516,10 +3614,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
-// ES6 String.prototype.indexOf(searchString [, position])
-// #sec-string.prototype.indexof
-Reduction JSCallReducer::ReduceStringPrototypeIndexOf(
- Handle<JSFunction> function, Node* node) {
+// ES #sec-string.prototype.indexof
+Reduction JSCallReducer::ReduceStringPrototypeIndexOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -3557,6 +3653,171 @@ Reduction JSCallReducer::ReduceStringPrototypeIndexOf(
return NoChange();
}
+// ES #sec-string.prototype.substring
+Reduction JSCallReducer::ReduceStringPrototypeSubstring(Node* node) {
+ if (node->op()->ValueInputCount() < 3) return NoChange();
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* start = NodeProperties::GetValueInput(node, 2);
+ Node* end = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+
+ start = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), start,
+ effect, control);
+
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
+
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), end,
+ jsgraph()->UndefinedConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = length;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ end, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ end = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ Node* finalStart =
+ graph()->NewNode(simplified()->NumberMin(),
+ graph()->NewNode(simplified()->NumberMax(), start,
+ jsgraph()->ZeroConstant()),
+ length);
+ Node* finalEnd =
+ graph()->NewNode(simplified()->NumberMin(),
+ graph()->NewNode(simplified()->NumberMax(), end,
+ jsgraph()->ZeroConstant()),
+ length);
+
+ Node* from =
+ graph()->NewNode(simplified()->NumberMin(), finalStart, finalEnd);
+ Node* to = graph()->NewNode(simplified()->NumberMax(), finalStart, finalEnd);
+
+ Node* value = effect = graph()->NewNode(simplified()->StringSubstring(),
+ receiver, from, to, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES #sec-string.prototype.slice
+Reduction JSCallReducer::ReduceStringPrototypeSlice(Node* node) {
+ if (node->op()->ValueInputCount() < 3) return NoChange();
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* start = NodeProperties::GetValueInput(node, 2);
+ Node* end = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+
+ start = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), start,
+ effect, control);
+
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Replace {end} argument with {length} if it is undefined.
+ {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), end,
+ jsgraph()->UndefinedConstant());
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = length;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), end, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ end = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+
+ Node* from = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->NumberLessThan(), start,
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(
+ simplified()->NumberMax(),
+ graph()->NewNode(simplified()->NumberAdd(), length, start),
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(simplified()->NumberMin(), start, length));
+ // {from} is always in non-negative Smi range, but our typer cannot
+ // figure that out yet.
+ from = effect = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()),
+ from, effect, control);
+
+ Node* to = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->NumberLessThan(), end,
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(simplified()->NumberMax(),
+ graph()->NewNode(simplified()->NumberAdd(), length, end),
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(simplified()->NumberMin(), end, length));
+ // {to} is always in non-negative Smi range, but our typer cannot
+ // figure that out yet.
+ to = effect = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), to,
+ effect, control);
+
+ Node* result_string = nullptr;
+ // Return empty string if {from} is smaller than {to}.
+ {
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), from, to);
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = etrue = graph()->NewNode(simplified()->StringSubstring(),
+ receiver, from, to, etrue, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->EmptyStringConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ result_string =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+
+ ReplaceWithValue(node, result_string, effect, control);
+ return Replace(result_string);
+}
+
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -3766,7 +4027,7 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
return NoChange();
- if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
return NoChange();
}
@@ -3882,7 +4143,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
return NoChange();
- if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ if (!UnionElementsKindUptoSize(&kind, receiver_map->elements_kind()))
return NoChange();
}
@@ -4005,7 +4266,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
{
// Call the generic C++ implementation.
const int builtin_index = Builtins::kArrayShift;
- CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
+ auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
Builtins::name(builtin_index), node->op()->properties(),
CallDescriptor::kNeedsFrameState);
@@ -4017,7 +4278,7 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
Node* argc =
jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(desc), stub_code, receiver,
+ graph()->NewNode(common()->Call(call_descriptor), stub_code, receiver,
jsgraph()->PaddingConstant(), argc, target,
jsgraph()->UndefinedConstant(), entry, argc, context,
frame_state, efalse1, if_false1);
@@ -4048,7 +4309,13 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
}
// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+// ES6 section 21.1.3.3 String.prototype.codePointAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeStringAt(
+ const Operator* string_access_operator, Node* node) {
+ DCHECK(string_access_operator->opcode() == IrOpcode::kStringCharAt ||
+ string_access_operator->opcode() == IrOpcode::kStringCharCodeAt ||
+ string_access_operator->opcode() == IrOpcode::kStringCodePointAt);
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -4056,112 +4323,919 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
}
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* index = jsgraph()->ZeroConstant();
+ Node* index = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->ZeroConstant();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Ensure that the {receiver} is actually a String.
receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
receiver, effect, control);
- if (node->op()->ValueInputCount() >= 3) {
- index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- NodeProperties::GetValueInput(node, 2),
- effect, control);
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
// Determine the {receiver} length.
Node* receiver_length =
graph()->NewNode(simplified()->StringLength(), receiver);
- // Check if {index} is less than {receiver} length.
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ // Check that the {index} is within range.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
index, receiver_length);
+ Node* value = effect = graph()->NewNode(string_access_operator, receiver,
+ masked_index, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+#ifdef V8_INTL_SUPPORT
+
+Reduction JSCallReducer::ReduceStringPrototypeToLowerCaseIntl(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 1), effect, control);
+
+ NodeProperties::ReplaceEffectInput(node, effect);
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToLowerCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceStringPrototypeToUpperCaseIntl(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 1), effect, control);
+
+ NodeProperties::ReplaceEffectInput(node, effect);
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToUpperCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+}
+
+#endif // V8_INTL_SUPPORT
+
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+Reduction JSCallReducer::ReduceStringFromCharCode(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() == 3) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* input = NodeProperties::GetValueInput(node, 2);
+
+ input = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ input, effect, control);
+
+ Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSCallReducer::ReduceStringPrototypeIterator(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 1), effect, control);
+ Node* iterator = effect =
+ graph()->NewNode(javascript()->CreateStringIterator(), receiver,
+ jsgraph()->NoContextConstant(), effect);
+ ReplaceWithValue(node, iterator, effect, control);
+ return Replace(iterator);
+}
+
+Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
+ JS_STRING_ITERATOR_TYPE)) {
+ Node* string = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
+ receiver, effect, control);
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), string);
+
+ // branch0: if (index < length)
+ Node* check0 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* etrue0 = effect;
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* done_true;
+ Node* vtrue0;
+ {
+ done_true = jsgraph()->FalseConstant();
+ Node* codepoint = etrue0 = graph()->NewNode(
+ simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string,
+ index, etrue0, if_true0);
+ vtrue0 = graph()->NewNode(
+ simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), codepoint);
+
+ // Update iterator.[[NextIndex]]
+ Node* char_length =
+ graph()->NewNode(simplified()->StringLength(), vtrue0);
+ index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
+ etrue0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, index, etrue0, if_true0);
+ }
- Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, if_true);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* done_false;
+ Node* vfalse0;
+ {
+ vfalse0 = jsgraph()->UndefinedConstant();
+ done_false = jsgraph()->TrueConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+ Node* done =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ done_true, done_false, control);
+
+ value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+ value, done, context, effect);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSCallReducer::ReduceAsyncFunctionPromiseCreate(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ // Morph this {node} into a JSCreatePromise node.
+ RelaxControls(node);
+ node->ReplaceInput(0, context);
+ node->ReplaceInput(1, effect);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, javascript()->CreatePromise());
+ return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceAsyncFunctionPromiseRelease(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ // The AsyncFunctionPromiseRelease builtin is a no-op as long as neither
+ // the debugger is active nor any promise hook has been installed (ever).
+ Node* value = jsgraph()->UndefinedConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+// ES section #sec-promise-reject-functions
+Reduction JSCallReducer::ReducePromiseCapabilityDefaultReject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* resolution = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // We need to execute in the {target}s context.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+
+ // Grab the promise closed over by {target}.
+ Node* promise = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, effect, control);
+
+ // Check if the {promise} is still pending or already settled.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), promise,
+ jsgraph()->UndefinedConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
- // Return the empty string otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->EmptyStringConstant();
+ Node* efalse = effect;
+ {
+ // Mark the {promise} as settled.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, jsgraph()->UndefinedConstant(), efalse, if_false);
+
+ // Check if we should emit a debug event.
+ Node* debug_event = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kDebugEventSlot)),
+ context, efalse, if_false);
+
+ // Actually reject the {promise}.
+ efalse =
+ graph()->NewNode(javascript()->RejectPromise(), promise, resolution,
+ debug_event, context, frame_state, efalse, if_false);
+ }
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value = jsgraph()->UndefinedConstant();
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
-// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-Reduction JSCallReducer::ReduceStringPrototypeCharCodeAt(Node* node) {
+// ES section #sec-promise-resolve-functions
+Reduction JSCallReducer::ReducePromiseCapabilityDefaultResolve(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* resolution = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // We need to execute in the {target}s context.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+
+ // Grab the promise closed over by {target}.
+ Node* promise = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, effect, control);
+
+ // Check if the {promise} is still pending or already settled.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), promise,
+ jsgraph()->UndefinedConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Mark the {promise} as settled.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ context, jsgraph()->UndefinedConstant(), efalse, if_false);
+
+ // Actually resolve the {promise}.
+ efalse =
+ graph()->NewNode(javascript()->ResolvePromise(), promise, resolution,
+ context, frame_state, efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ Node* value = jsgraph()->UndefinedConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Node* JSCallReducer::CreateArtificialFrameState(
+ Node* node, Node* outer_frame_state, int parameter_count,
+ BailoutId bailout_id, FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared) {
+ const FrameStateFunctionInfo* state_info =
+ common()->CreateFrameStateFunctionInfo(frame_state_type,
+ parameter_count + 1, 0, shared);
+
+ const Operator* op = common()->FrameState(
+ bailout_id, OutputFrameStateCombine::Ignore(), state_info);
+ const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
+ Node* node0 = graph()->NewNode(op0);
+ std::vector<Node*> params;
+ for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
+ params.push_back(node->InputAt(1 + parameter));
+ }
+ const Operator* op_param = common()->StateValues(
+ static_cast<int>(params.size()), SparseInputMask::Dense());
+ Node* params_node = graph()->NewNode(
+ op_param, static_cast<int>(params.size()), &params.front());
+ return graph()->NewNode(op, params_node, node0, node0,
+ jsgraph()->UndefinedConstant(), node->InputAt(0),
+ outer_frame_state);
+}
+
+Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+ ConstructParameters const& p = ConstructParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ // We only inline when we have the executor.
+ if (arity < 1) return NoChange();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* executor = NodeProperties::GetValueInput(node, 1);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ if (!FLAG_experimental_inline_promise_constructor) return NoChange();
+
+ // Only handle builtins Promises, not subclasses.
+ if (target != new_target) return NoChange();
+
+ // Add a code dependency on the promise hook protector.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ Handle<SharedFunctionInfo> promise_shared(
+ handle(native_context()->promise_function()->shared()));
+
+ // Insert a construct stub frame into the chain of frame states. This will
+ // reconstruct the proper frame when deoptimizing within the constructor.
+ // For the frame state, we only provide the executor parameter, even if more
+ // arugments were passed. This is not observable from JS.
+ DCHECK_EQ(1, promise_shared->internal_formal_parameter_count());
+ Node* constructor_frame_state = CreateArtificialFrameState(
+ node, outer_frame_state, 1, BailoutId::ConstructStubInvoke(),
+ FrameStateType::kConstructStub, promise_shared);
+
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ Node* checkpoint_params[] = {jsgraph()->UndefinedConstant(),
+ jsgraph()->UndefinedConstant()};
+ const int stack_parameters = arraysize(checkpoint_params);
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), promise_shared,
+ Builtins::kPromiseConstructorLazyDeoptContinuation, target, context,
+ &checkpoint_params[0], stack_parameters, constructor_frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ // Check if executor is callable
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ // TODO(petermarshall): The frame state is wrong here.
+ WireInCallbackIsCallableCheck(executor, context, frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Create the resulting JSPromise.
+ Node* promise = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+
+ // 8. CreatePromiseResolvingFunctions
+ // Allocate a promise context for the closures below.
+ Node* promise_context = effect = graph()->NewNode(
+ javascript()->CreateFunctionContext(
+ PromiseBuiltinsAssembler::kPromiseContextLength, FUNCTION_SCOPE),
+ context, context, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kPromiseSlot)),
+ promise_context, promise, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kDebugEventSlot)),
+ promise_context, jsgraph()->TrueConstant(), effect, control);
+
+ // Allocate the closure for the resolve case.
+ Handle<SharedFunctionInfo> resolve_shared(
+ native_context()->promise_capability_default_resolve_shared_fun(),
+ isolate());
+ Node* resolve = effect =
+ graph()->NewNode(javascript()->CreateClosure(
+ resolve_shared, factory()->many_closures_cell(),
+ handle(resolve_shared->code(), isolate())),
+ promise_context, effect, control);
+
+ // Allocate the closure for the reject case.
+ Handle<SharedFunctionInfo> reject_shared(
+ native_context()->promise_capability_default_reject_shared_fun(),
+ isolate());
+ Node* reject = effect =
+ graph()->NewNode(javascript()->CreateClosure(
+ reject_shared, factory()->many_closures_cell(),
+ handle(reject_shared->code(), isolate())),
+ promise_context, effect, control);
+
+ // Re-use the params from above, but actually set the promise parameter now.
+ checkpoint_params[1] = promise;
+
+ // This simple continuation just returns the created promise.
+ // TODO(petermarshall): If the executor function causes lazy deopt, and it
+ // also throws an exception, we should catch the exception and call the reject
+ // function.
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), promise_shared,
+ Builtins::kPromiseConstructorLazyDeoptContinuation, target, context,
+ &checkpoint_params[0], stack_parameters, constructor_frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ // 9. Call executor with both resolving functions
+ effect = control = graph()->NewNode(
+ javascript()->Call(4, p.frequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNullOrUndefined,
+ SpeculationMode::kDisallowSpeculation),
+ executor, jsgraph()->UndefinedConstant(), resolve, reject, context,
+ frame_state, effect, control);
+
+ Node* exception_effect = effect;
+ Node* exception_control = control;
+ {
+ Node* reason = exception_effect = exception_control = graph()->NewNode(
+ common()->IfException(), exception_control, exception_effect);
+ // 10a. Call reject if the call to executor threw.
+ exception_effect = exception_control = graph()->NewNode(
+ javascript()->Call(3, p.frequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNullOrUndefined,
+ SpeculationMode::kDisallowSpeculation),
+ reject, jsgraph()->UndefinedConstant(), reason, context, frame_state,
+ exception_effect, exception_control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception,
+ exception_effect, &check_fail,
+ &exception_control);
+ }
+ }
+
+ Node* success_effect = effect;
+ Node* success_control = control;
+ {
+ success_control = graph()->NewNode(common()->IfSuccess(), success_control);
+ }
+
+ control =
+ graph()->NewNode(common()->Merge(2), success_control, exception_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), success_effect,
+ exception_effect, control);
+
+ // Wire up the branch for the case when IsCallable fails for the executor.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, promise, effect, control);
+ return Replace(promise);
+}
+
+// V8 Extras: v8.createPromise(parent)
+Reduction JSCallReducer::ReducePromiseInternalConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Install a code dependency on the promise hook protector cell.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+
+ // Create a new pending promise.
+ Node* value = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+}
+
+// V8 Extras: v8.rejectPromise(promise, reason)
+Reduction JSCallReducer::ReducePromiseInternalReject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* promise = node->op()->ValueInputCount() >= 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* reason = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* debug_event = jsgraph()->TrueConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Reject the {promise} using the given {reason}, and trigger debug logic.
+ Node* value = effect =
+ graph()->NewNode(javascript()->RejectPromise(), promise, reason,
+ debug_event, context, frame_state, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// V8 Extras: v8.resolvePromise(promise, resolution)
+Reduction JSCallReducer::ReducePromiseInternalResolve(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* promise = node->op()->ValueInputCount() >= 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* resolution = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Resolve the {promise} using the given {resolution}.
+ Node* value = effect =
+ graph()->NewNode(javascript()->ResolvePromise(), promise, resolution,
+ context, frame_state, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES section #sec-promise.prototype.catch
+Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
+ int arity = static_cast<int>(p.arity() - 2);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check that the Promise.then protector is intact. This protector guards
+ // that all JSPromise instances whose [[Prototype]] is the initial
+ // %PromisePrototype% yield the initial %PromisePrototype%.then method
+ // when looking up "then".
+ if (!isolate()->IsPromiseThenLookupChainIntact()) return NoChange();
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSPromiseMap()) return NoChange();
+ if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ return NoChange();
+ }
+ }
+
+ // Add a code dependency on the necessary protectors.
+ dependencies()->AssumePropertyCell(factory()->promise_then_protector());
+
+ // If the {receiver_maps} aren't reliable, we need to repeat the
+ // map check here, guarded by the CALL_IC.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Massage the {node} to call "then" instead by first removing all inputs
+ // following the onRejected parameter, and then filling up the parameters
+ // to two inputs from the left with undefined.
+ Node* target = jsgraph()->Constant(handle(native_context()->promise_then()));
+ NodeProperties::ReplaceValueInput(node, target, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ for (; arity > 1; --arity) node->RemoveInput(3);
+ for (; arity < 2; ++arity) {
+ node->InsertInput(graph()->zone(), 2, jsgraph()->UndefinedConstant());
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined,
+ p.speculation_mode()));
+ Reduction const reduction = ReducePromisePrototypeThen(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+// ES section #sec-promise.prototype.finally
+Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* index = jsgraph()->ZeroConstant();
+ Node* on_finally = arity >= 1 ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
- receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
- receiver, effect, control);
- if (node->op()->ValueInputCount() >= 3) {
- index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- NodeProperties::GetValueInput(node, 2),
- effect, control);
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ // Check that the Promise#then protector is intact. This protector guards
+ // that all JSPromise instances whose [[Prototype]] is the initial
+ // %PromisePrototype% yield the initial %PromisePrototype%.then method
+ // when looking up "then".
+ if (!isolate()->IsPromiseThenLookupChainIntact()) return NoChange();
+
+ // Also check that the @@species protector is intact, which guards the
+ // lookup of "constructor" on JSPromise instances, whoch [[Prototype]] is
+ // the initial %PromisePrototype%, and the Symbol.species lookup on the
+ // %PromisePrototype%.
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSPromiseMap()) return NoChange();
+ if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ return NoChange();
+ }
}
- // Determine the {receiver} length.
- Node* receiver_length =
- graph()->NewNode(simplified()->StringLength(), receiver);
+ // Add a code dependency on the necessary protectors.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->AssumePropertyCell(factory()->promise_then_protector());
+ dependencies()->AssumePropertyCell(factory()->species_protector());
- // Check if {index} is less than {receiver} length.
- Node* check =
- graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ // If the {receiver_maps} aren't reliable, we need to repeat the
+ // map check here, guarded by the CALL_IC.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Check if {on_finally} is callable, and if so wrap it into appropriate
+ // closures that perform the finalization.
+ Node* check = graph()->NewNode(simplified()->ObjectIsCallable(), on_finally);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- // Load the character from the {receiver}.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* catch_true;
+ Node* then_true;
+ {
+ Node* context = jsgraph()->HeapConstant(native_context());
+ Node* constructor = jsgraph()->HeapConstant(
+ handle(native_context()->promise_function(), isolate()));
+
+ // Allocate shared context for the closures below.
+ context = etrue = graph()->NewNode(
+ javascript()->CreateFunctionContext(
+ PromiseBuiltinsAssembler::kPromiseFinallyContextLength,
+ FUNCTION_SCOPE),
+ context, context, etrue, if_true);
+ etrue =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kOnFinallySlot)),
+ context, on_finally, etrue, if_true);
+ etrue =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForContextSlot(
+ PromiseBuiltinsAssembler::kConstructorSlot)),
+ context, constructor, etrue, if_true);
+
+ // Allocate the closure for the reject case.
+ Handle<SharedFunctionInfo> catch_finally(
+ native_context()->promise_catch_finally_shared_fun(), isolate());
+ catch_true = etrue =
+ graph()->NewNode(javascript()->CreateClosure(
+ catch_finally, factory()->many_closures_cell(),
+ handle(catch_finally->code(), isolate())),
+ context, etrue, if_true);
+
+ // Allocate the closure for the fulfill case.
+ Handle<SharedFunctionInfo> then_finally(
+ native_context()->promise_then_finally_shared_fun(), isolate());
+ then_true = etrue =
+ graph()->NewNode(javascript()->CreateClosure(
+ then_finally, factory()->many_closures_cell(),
+ handle(then_finally->code(), isolate())),
+ context, etrue, if_true);
+ }
- Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
- index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, if_true);
-
- // Return NaN otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->NaNConstant();
+ Node* efalse = effect;
+ Node* catch_false = on_finally;
+ Node* then_false = on_finally;
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* catch_finally =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ catch_true, catch_false, control);
+ Node* then_finally =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ then_true, then_false, control);
+
+ // At this point we definitely know that {receiver} has one of the
+ // {receiver_maps}, so insert a MapGuard as a hint for the lowering
+ // of the call to "then" below.
+ effect = graph()->NewNode(simplified()->MapGuard(receiver_maps), receiver,
+ effect, control);
+
+ // Massage the {node} to call "then" instead by first removing all inputs
+ // following the onFinally parameter, and then replacing the only parameter
+ // input with the {on_finally} value.
+ Node* target = jsgraph()->Constant(handle(native_context()->promise_then()));
+ NodeProperties::ReplaceValueInput(node, target, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ for (; arity > 2; --arity) node->RemoveInput(2);
+ for (; arity < 2; ++arity)
+ node->InsertInput(graph()->zone(), 2, then_finally);
+ node->ReplaceInput(2, then_finally);
+ node->ReplaceInput(3, catch_finally);
+ NodeProperties::ChangeOp(
+ node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined,
+ p.speculation_mode()));
+ Reduction const reduction = ReducePromisePrototypeThen(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* on_fulfilled = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* on_rejected = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check that promises aren't being observed through (debug) hooks.
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
+
+ // Check if the @@species protector is intact. The @@species protector
+ // guards the "constructor" lookup on all JSPromise instances and the
+ // initial Promise.prototype, as well as the Symbol.species lookup on
+ // the Promise constructor.
+ if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
+ return NoChange();
+ }
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSPromiseMap()) return NoChange();
+ if (receiver_map->prototype() != native_context()->promise_prototype()) {
+ return NoChange();
+ }
+ }
+
+ // Add a code dependency on the necessary protectors.
+ dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ // If the {receiver_maps} aren't reliable, we need to repeat the
+ // map check here, guarded by the CALL_IC.
+ if (infer_receiver_maps_result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Check that {on_fulfilled} is callable.
+ on_fulfilled = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ graph()->NewNode(simplified()->ObjectIsCallable(), on_fulfilled),
+ on_fulfilled, jsgraph()->UndefinedConstant());
+
+ // Check that {on_rejected} is callable.
+ on_rejected = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ graph()->NewNode(simplified()->ObjectIsCallable(), on_rejected),
+ on_rejected, jsgraph()->UndefinedConstant());
+
+ // Create the resulting JSPromise.
+ Node* result = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+
+ // Chain {result} onto {receiver}.
+ result = effect = graph()->NewNode(javascript()->PerformPromiseThen(),
+ receiver, on_fulfilled, on_rejected,
+ result, context, effect, control);
+ ReplaceWithValue(node, result, effect, control);
+ return Replace(result);
+}
+
+// ES section #sec-promise.resolve
+Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* value = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if we know something about {receiver} already.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
+ return NoChange();
+ }
+ DCHECK_NE(0, receiver_maps.size());
+
+ // Only reduce when all {receiver_maps} are JSReceiver maps.
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSReceiverMap()) return NoChange();
+ }
+
+ // Morph the {node} into a JSPromiseResolve operation.
+ node->ReplaceInput(0, receiver);
+ node->ReplaceInput(1, value);
+ node->ReplaceInput(2, context);
+ node->ReplaceInput(3, frame_state);
+ node->ReplaceInput(4, effect);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node, javascript()->PromiseResolve());
+ return Changed(node);
}
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index b2656b6be8..675cc6df83 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_JS_CALL_REDUCER_H_
#include "src/base/flags.h"
+#include "src/compiler/frame-states.h"
#include "src/compiler/graph-reducer.h"
#include "src/deoptimize-reason.h"
@@ -28,7 +29,7 @@ class SimplifiedOperatorBuilder;
// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
public:
// Flags that control the mode of operation.
enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 };
@@ -54,7 +55,8 @@ class JSCallReducer final : public AdvancedReducer {
private:
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
- Reduction ReduceCallApiFunction(Node* node, Handle<JSFunction> function);
+ Reduction ReduceCallApiFunction(Node* node,
+ Handle<SharedFunctionInfo> shared);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -71,16 +73,17 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceReflectGet(Node* node);
Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceReflectHas(Node* node);
- Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayReduce(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayReduceRight(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayFilter(Handle<JSFunction> function, Node* node);
- enum class ArrayFindVariant : uint8_t { kFind, kFindIndex };
- Reduction ReduceArrayFind(ArrayFindVariant variant,
- Handle<JSFunction> function, Node* node);
- Reduction ReduceArrayEvery(Handle<JSFunction> function, Node* node);
- Reduction ReduceArraySome(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayForEach(Node* node, Handle<SharedFunctionInfo> shared);
+ enum class ArrayReduceDirection { kLeft, kRight };
+ Reduction ReduceArrayReduce(Node* node, ArrayReduceDirection direction,
+ Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArrayMap(Node* node, Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArrayFilter(Node* node, Handle<SharedFunctionInfo> shared);
+ enum class ArrayFindVariant { kFind, kFindIndex };
+ Reduction ReduceArrayFind(Node* node, ArrayFindVariant variant,
+ Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArrayEvery(Node* node, Handle<SharedFunctionInfo> shared);
+ Reduction ReduceArraySome(Node* node, Handle<SharedFunctionInfo> shared);
Reduction ReduceArrayPrototypePush(Node* node);
Reduction ReduceArrayPrototypePop(Node* node);
Reduction ReduceArrayPrototypeShift(Node* node);
@@ -91,16 +94,46 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSConstructWithArrayLike(Node* node);
Reduction ReduceJSConstructWithSpread(Node* node);
Reduction ReduceJSCall(Node* node);
+ Reduction ReduceJSCall(Node* node, Handle<SharedFunctionInfo> shared);
Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
Reduction ReduceReturnReceiver(Node* node);
- Reduction ReduceStringPrototypeIndexOf(Handle<JSFunction> function,
- Node* node);
- Reduction ReduceStringPrototypeCharAt(Node* node);
- Reduction ReduceStringPrototypeCharCodeAt(Node* node);
+ Reduction ReduceStringPrototypeIndexOf(Node* node);
+ Reduction ReduceStringPrototypeSubstring(Node* node);
+ Reduction ReduceStringPrototypeSlice(Node* node);
+ Reduction ReduceStringPrototypeStringAt(
+ const Operator* string_access_operator, Node* node);
+
+#ifdef V8_INTL_SUPPORT
+ Reduction ReduceStringPrototypeToLowerCaseIntl(Node* node);
+ Reduction ReduceStringPrototypeToUpperCaseIntl(Node* node);
+#endif // V8_INTL_SUPPORT
+
+ Reduction ReduceStringFromCharCode(Node* node);
+ Reduction ReduceStringPrototypeIterator(Node* node);
+ Reduction ReduceStringIteratorPrototypeNext(Node* node);
+
+ Reduction ReduceAsyncFunctionPromiseCreate(Node* node);
+ Reduction ReduceAsyncFunctionPromiseRelease(Node* node);
+ Reduction ReducePromiseCapabilityDefaultReject(Node* node);
+ Reduction ReducePromiseCapabilityDefaultResolve(Node* node);
+ Reduction ReducePromiseConstructor(Node* node);
+ Reduction ReducePromiseInternalConstructor(Node* node);
+ Reduction ReducePromiseInternalReject(Node* node);
+ Reduction ReducePromiseInternalResolve(Node* node);
+ Reduction ReducePromisePrototypeCatch(Node* node);
+ Reduction ReducePromisePrototypeFinally(Node* node);
+ Reduction ReducePromisePrototypeThen(Node* node);
+ Reduction ReducePromiseResolveTrampoline(Node* node);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+ Reduction ReduceMathUnary(Node* node, const Operator* op);
+ Reduction ReduceMathBinary(Node* node, const Operator* op);
+ Reduction ReduceMathImul(Node* node);
+ Reduction ReduceMathClz32(Node* node);
+ Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value);
+
// Returns the updated {to} node, and updates control and effect along the
// way.
Node* DoFilterPostCallbackWork(ElementsKind kind, Node** control,
@@ -135,6 +168,11 @@ class JSCallReducer final : public AdvancedReducer {
Node** effect, Node** k,
const VectorSlotPair& feedback);
+ Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count, BailoutId bailout_id,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index d3b9ee4e70..f535b52a27 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -143,8 +143,12 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
+ case IrOpcode::kJSCreateStringIterator:
+ return ReduceJSCreateStringIterator(node);
case IrOpcode::kJSCreateKeyValueArray:
return ReduceJSCreateKeyValueArray(node);
+ case IrOpcode::kJSCreatePromise:
+ return ReduceJSCreatePromise(node);
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
return ReduceJSCreateLiteralArrayOrObject(node);
@@ -346,6 +350,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// whether there conceptually is an arguments adaptor frame in the call
// chain.
Node* const args_state = GetArgumentsFrameState(frame_state);
+ if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
+ IrOpcode::kDeadValue) {
+ // This protects against an incompletely propagated DeadValue node.
+ // If the FrameState has a DeadValue input, then this node will be
+ // pruned anyway.
+ return NoChange();
+ }
FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
// Prepare element backing store to be used by arguments object.
bool has_aliased_arguments = false;
@@ -379,6 +390,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// whether there conceptually is an arguments adaptor frame in the call
// chain.
Node* const args_state = GetArgumentsFrameState(frame_state);
+ if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
+ IrOpcode::kDeadValue) {
+ // This protects against an incompletely propagated DeadValue node.
+ // If the FrameState has a DeadValue input, then this node will be
+ // pruned anyway.
+ return NoChange();
+ }
FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
// Prepare element backing store to be used by arguments object.
Node* const elements = AllocateArguments(effect, control, args_state);
@@ -408,6 +426,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// whether there conceptually is an arguments adaptor frame in the call
// chain.
Node* const args_state = GetArgumentsFrameState(frame_state);
+ if (args_state->InputAt(kFrameStateParametersInput)->opcode() ==
+ IrOpcode::kDeadValue) {
+ // This protects against an incompletely propagated DeadValue node.
+ // If the FrameState has a DeadValue input, then this node will be
+ // pruned anyway.
+ return NoChange();
+ }
FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
// Prepare element backing store to be used by the rest array.
Node* const elements =
@@ -490,8 +515,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
if (initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectQueue(), undefined);
- a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise(),
- undefined);
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectIsAwaiting(),
+ jsgraph()->ZeroConstant());
}
// Handle in-object properties, too.
@@ -682,37 +707,37 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
if (arity == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
arity + 1, CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else if (arity == 1) {
// Require elements kind to "go holey".
ArraySingleArgumentConstructorStub stub(
isolate(), GetHoleyElementsKind(elements_kind), override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
arity + 1, CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
DCHECK_GT(arity, 1);
ArrayNArgumentsConstructorStub stub(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
arity + 1, CallDescriptor::kNeedsFrameState);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, type_info);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
return Changed(node);
}
@@ -896,6 +921,8 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
Handle<SharedFunctionInfo> shared = p.shared_info();
+ Handle<FeedbackCell> feedback_cell = p.feedback_cell();
+ Handle<Code> code = p.code();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
@@ -903,45 +930,54 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// Use inline allocation of closures only for instantiation sites that have
// seen more than one instantiation, this simplifies the generated code and
// also serves as a heuristic of which allocation sites benefit from it.
- FeedbackSlot slot(FeedbackVector::ToSlot(p.feedback().index()));
- Handle<Cell> vector_cell(Cell::cast(p.feedback().vector()->Get(slot)));
- if (vector_cell->map() == isolate()->heap()->many_closures_cell_map()) {
- Handle<Map> function_map(
- Map::cast(native_context()->get(shared->function_map_index())));
- Node* lazy_compile_builtin = jsgraph()->HeapConstant(
- handle(isolate()->builtins()->builtin(Builtins::kCompileLazy)));
- DCHECK(!function_map->IsInobjectSlackTrackingInProgress());
- DCHECK(!function_map->is_dictionary_map());
-
- // Emit code to allocate the JSFunction instance.
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(function_map->instance_size());
- a.Store(AccessBuilder::ForMap(), function_map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
- a.Store(AccessBuilder::ForJSFunctionContext(), context);
- a.Store(AccessBuilder::ForJSFunctionFeedbackVector(), vector_cell);
- a.Store(AccessBuilder::ForJSFunctionCode(), lazy_compile_builtin);
- STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
- if (function_map->has_prototype_slot()) {
- a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
- jsgraph()->TheHoleConstant());
- STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kPointerSize);
- }
- for (int i = 0; i < function_map->GetInObjectProperties(); i++) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
- jsgraph()->UndefinedConstant());
- }
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
+ if (feedback_cell->map() != isolate()->heap()->many_closures_cell_map()) {
+ // The generic path can only create closures for user functions.
+ DCHECK_EQ(isolate()->builtins()->builtin(Builtins::kCompileLazy), *code);
+ return NoChange();
}
- return NoChange();
+ Handle<Map> function_map(
+ Map::cast(native_context()->get(shared->function_map_index())));
+ DCHECK(!function_map->IsInobjectSlackTrackingInProgress());
+ DCHECK(!function_map->is_dictionary_map());
+
+ // TODO(turbofan): We should use the pretenure flag from {p} here,
+ // but currently the heuristic in the parser works against us, as
+ // it marks closures like
+ //
+ // args[l] = function(...) { ... }
+ //
+ // for old-space allocation, which doesn't always make sense. For
+ // example in case of the bluebird-parallel benchmark, where this
+ // is a core part of the *promisify* logic (see crbug.com/810132).
+ PretenureFlag pretenure = NOT_TENURED;
+
+ // Emit code to allocate the JSFunction instance.
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(function_map->instance_size(), pretenure, Type::Function());
+ a.Store(AccessBuilder::ForMap(), function_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+ a.Store(AccessBuilder::ForJSFunctionContext(), context);
+ a.Store(AccessBuilder::ForJSFunctionFeedbackCell(), feedback_cell);
+ a.Store(AccessBuilder::ForJSFunctionCode(), code);
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
+ if (function_map->has_prototype_slot()) {
+ a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(),
+ jsgraph()->TheHoleConstant());
+ STATIC_ASSERT(JSFunction::kSizeWithPrototype == 8 * kPointerSize);
+ }
+ for (int i = 0; i < function_map->GetInObjectProperties(); i++) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(function_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
}
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
@@ -968,6 +1004,28 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
return Changed(node);
}
+Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateStringIterator, node->opcode());
+ Node* string = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ Node* map = jsgraph()->HeapConstant(
+ handle(native_context()->string_iterator_map(), isolate()));
+ // Allocate new iterator and attach the iterator to this string.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(JSStringIterator::kSize, NOT_TENURED, Type::OtherObject());
+ a.Store(AccessBuilder::ForMap(), map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSStringIteratorString(), string);
+ a.Store(AccessBuilder::ForJSStringIteratorIndex(), jsgraph()->SmiConstant(0));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateKeyValueArray, node->opcode());
Node* key = NodeProperties::GetValueInput(node, 0);
@@ -998,6 +1056,34 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
return Changed(node);
}
+Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ Handle<Map> promise_map(native_context()->promise_function()->initial_map());
+
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(promise_map->instance_size());
+ a.Store(AccessBuilder::ForMap(), promise_map);
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectOffset(JSPromise::kReactionsOrResultOffset),
+ jsgraph()->ZeroConstant());
+ STATIC_ASSERT(v8::Promise::kPending == 0);
+ a.Store(AccessBuilder::ForJSObjectOffset(JSPromise::kFlagsOffset),
+ jsgraph()->ZeroConstant());
+ STATIC_ASSERT(JSPromise::kSize == 5 * kPointerSize);
+ for (int i = 0; i < v8::Promise::kEmbedderFieldCount; ++i) {
+ a.Store(
+ AccessBuilder::ForJSObjectOffset(JSPromise::kSize + i * kPointerSize),
+ jsgraph()->ZeroConstant());
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
node->opcode() == IrOpcode::kJSCreateLiteralObject);
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 42b4740dd0..00e8b73459 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -53,7 +53,9 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateBoundFunction(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
+ Reduction ReduceJSCreateStringIterator(Node* node);
Reduction ReduceJSCreateKeyValueArray(Node* node);
+ Reduction ReduceJSCreatePromise(Node* node);
Reduction ReduceJSCreateLiteralArrayOrObject(Node* node);
Reduction ReduceJSCreateEmptyLiteralObject(Node* node);
Reduction ReduceJSCreateEmptyLiteralArray(Node* node);
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index c09dcbc1b3..aa26b33997 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -85,6 +85,11 @@ REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
REPLACE_STUB_CALL(ToString)
REPLACE_STUB_CALL(ForInEnumerate)
+REPLACE_STUB_CALL(FulfillPromise)
+REPLACE_STUB_CALL(PerformPromiseThen)
+REPLACE_STUB_CALL(PromiseResolve)
+REPLACE_STUB_CALL(RejectPromise)
+REPLACE_STUB_CALL(ResolvePromise)
#undef REPLACE_STUB_CALL
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
@@ -97,12 +102,12 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
Operator::Properties properties,
int result_size) {
const CallInterfaceDescriptor& descriptor = callable.descriptor();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
properties, MachineType::AnyTagged(), result_size);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
@@ -113,14 +118,14 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Operator::Properties properties = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetRuntimeCallDescriptor(zone(), f, nargs, properties, flags);
Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
Node* arity = jsgraph()->Int32Constant(nargs);
node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
node->InsertInput(zone(), nargs + 1, ref);
node->InsertInput(zone(), nargs + 2, arity);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSStrictEqual(Node* node) {
@@ -344,7 +349,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
int const arity = static_cast<int>(p.arity());
Handle<AllocationSite> const site = p.site();
ArrayConstructorDescriptor descriptor(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, arity + 1,
CallDescriptor::kNeedsFrameState, node->op()->properties(),
MachineType::AnyTagged());
@@ -357,7 +362,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, type_info);
node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCreateBoundFunction(Node* node) {
@@ -366,26 +371,19 @@ void JSGenericLowering::LowerJSCreateBoundFunction(Node* node) {
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
- node->RemoveInput(3); // control
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.feedback_cell()));
+ node->RemoveInput(4); // control
// Use the FastNewClosure builtin only for functions allocated in new space.
if (p.pretenure() == NOT_TENURED) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kFastNewClosure);
- node->InsertInput(zone(), 1,
- jsgraph()->HeapConstant(p.feedback().vector()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
ReplaceWithStubCall(node, callable, flags);
} else {
- node->InsertInput(zone(), 1,
- jsgraph()->HeapConstant(p.feedback().vector()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
- ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
+ ReplaceWithRuntimeCall(node, Runtime::kNewClosure_Tenured);
}
}
@@ -420,10 +418,18 @@ void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSCreateStringIterator(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSCreateKeyValueArray(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSCreatePromise(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -514,7 +520,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructForwardVarargs(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
@@ -527,7 +533,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, start_index);
node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstruct(Node* node) {
@@ -535,7 +541,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::Construct(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
@@ -546,14 +552,14 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
node->InsertInput(zone(), 2, new_target);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
@@ -563,7 +569,7 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
node->ReplaceInput(2, new_target);
node->ReplaceInput(3, arguments_list);
node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
@@ -573,7 +579,7 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
int const new_target_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructWithSpread(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
@@ -589,7 +595,7 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
node->InsertInput(zone(), 3, stack_arg_count);
node->InsertInput(zone(), 4, spread);
node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
@@ -597,7 +603,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallForwardVarargs(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
@@ -605,7 +611,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
node->InsertInput(zone(), 3, start_index);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCall(Node* node) {
@@ -614,19 +620,19 @@ void JSGenericLowering::LowerJSCall(Node* node) {
ConvertReceiverMode const mode = p.convert_mode();
Callable callable = CodeFactory::Call(isolate(), mode);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
Callable callable = CodeFactory::CallWithArrayLike(isolate());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = node->InputAt(1);
@@ -634,7 +640,7 @@ void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->ReplaceInput(3, receiver);
node->ReplaceInput(2, arguments_list);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
@@ -643,7 +649,7 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
int const spread_index = static_cast<int>(p.arity() + 1);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallWithSpread(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
@@ -652,7 +658,7 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
node->InsertInput(zone(), 2, stack_arg_count);
node->InsertInput(zone(), 3, node->InputAt(spread_index));
node->RemoveInput(spread_index + 1);
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
@@ -693,6 +699,10 @@ void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSGeneratorRestoreContext(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSGeneratorRestoreInputOrDebugPos(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index f5b4bdc181..9bbe2178fb 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -217,4 +217,4 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_JS_GRAPH_H_
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index add2b2c478..a995b038a8 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -323,8 +323,7 @@ bool JSInliner::DetermineCallTarget(
// target.
// TODO(turbofan): We might consider to eagerly create the feedback vector
// in such a case (in {DetermineCallContext} below) eventually.
- FeedbackSlot slot = p.feedback().slot();
- Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+ Handle<FeedbackCell> cell = p.feedback_cell();
if (!cell->value()->IsFeedbackVector()) return false;
shared_info_out = p.shared_info();
@@ -348,9 +347,9 @@ void JSInliner::DetermineCallContext(
if (match.HasValue() && match.Value()->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
- // If the target function was never invoked, its literals array might not
- // contain a feedback vector. We ensure at this point that it is created.
- JSFunction::EnsureLiterals(function);
+ // If the target function was never invoked, its feedback cell array might
+ // not contain a feedback vector. We ensure at this point that it's created.
+ JSFunction::EnsureFeedbackVector(function);
// The inlinee specializes to the context from the JSFunction object.
context_out = jsgraph()->Constant(handle(function->context()));
@@ -363,8 +362,7 @@ void JSInliner::DetermineCallContext(
// Load the feedback vector of the target by looking up its vector cell at
// the instantiation site (we only decide to inline if it's populated).
- FeedbackSlot slot = p.feedback().slot();
- Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+ Handle<FeedbackCell> cell = p.feedback_cell();
DCHECK(cell->value()->IsFeedbackVector());
// The inlinee uses the locally provided context at instantiation.
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index dc1ec521f2..c570a1f8dd 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -41,6 +41,14 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceCreateJSGeneratorObject(node);
case Runtime::kInlineGeneratorGetInputOrDebugPos:
return ReduceGeneratorGetInputOrDebugPos(node);
+ case Runtime::kInlineAsyncFunctionAwaitCaught:
+ return ReduceAsyncFunctionAwaitCaught(node);
+ case Runtime::kInlineAsyncFunctionAwaitUncaught:
+ return ReduceAsyncFunctionAwaitUncaught(node);
+ case Runtime::kInlineAsyncGeneratorAwaitCaught:
+ return ReduceAsyncGeneratorAwaitCaught(node);
+ case Runtime::kInlineAsyncGeneratorAwaitUncaught:
+ return ReduceAsyncGeneratorAwaitUncaught(node);
case Runtime::kInlineAsyncGeneratorReject:
return ReduceAsyncGeneratorReject(node);
case Runtime::kInlineAsyncGeneratorResolve:
@@ -49,8 +57,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceAsyncGeneratorYield(node);
case Runtime::kInlineGeneratorGetResumeMode:
return ReduceGeneratorGetResumeMode(node);
- case Runtime::kInlineGeneratorGetContext:
- return ReduceGeneratorGetContext(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
@@ -69,6 +75,10 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
+ case Runtime::kInlineRejectPromise:
+ return ReduceRejectPromise(node);
+ case Runtime::kInlineResolvePromise:
+ return ReduceResolvePromise(node);
case Runtime::kInlineToInteger:
return ReduceToInteger(node);
case Runtime::kInlineToLength:
@@ -83,12 +93,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceCall(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
- case Runtime::kInlineArrayBufferViewGetByteLength:
- return ReduceArrayBufferViewField(
- node, AccessBuilder::ForJSArrayBufferViewByteLength());
- case Runtime::kInlineArrayBufferViewGetByteOffset:
- return ReduceArrayBufferViewField(
- node, AccessBuilder::ForJSArrayBufferViewByteOffset());
case Runtime::kInlineArrayBufferViewWasNeutered:
return ReduceArrayBufferViewWasNeutered(node);
case Runtime::kInlineMaxSmi:
@@ -98,8 +102,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
AccessBuilder::ForJSTypedArrayLength());
case Runtime::kInlineTheHole:
return ReduceTheHole(node);
- case Runtime::kInlineClassOf:
- return ReduceClassOf(node);
case Runtime::kInlineStringMaxLength:
return ReduceStringMaxLength(node);
default:
@@ -183,6 +185,33 @@ Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
return Change(node, op, generator, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionAwaitCaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncFunctionAwaitCaught), 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncFunctionAwaitUncaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncFunctionAwaitUncaught),
+ 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorAwaitCaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorAwaitCaught),
+ 0);
+}
+
+Reduction JSIntrinsicLowering::ReduceAsyncGeneratorAwaitUncaught(Node* node) {
+ return Change(
+ node,
+ Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorAwaitUncaught),
+ 0);
+}
+
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorReject(Node* node) {
return Change(
node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorReject),
@@ -201,16 +230,6 @@ Reduction JSIntrinsicLowering::ReduceAsyncGeneratorYield(Node* node) {
0);
}
-Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
- Node* const generator = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op =
- simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
-
- return Change(node, op, generator, effect, control);
-}
-
Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
Node* const generator = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -272,6 +291,17 @@ Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
return Change(node, simplified()->ObjectIsSmi());
}
+Reduction JSIntrinsicLowering::ReduceRejectPromise(Node* node) {
+ RelaxControls(node);
+ NodeProperties::ChangeOp(node, javascript()->RejectPromise());
+ return Changed(node);
+}
+
+Reduction JSIntrinsicLowering::ReduceResolvePromise(Node* node) {
+ RelaxControls(node);
+ NodeProperties::ChangeOp(node, javascript()->ResolvePromise());
+ return Changed(node);
+}
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
// Replace all effect uses of {node} with the effect dependency.
@@ -384,16 +414,6 @@ Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceClassOf(Node* node) {
- RelaxEffectsAndControls(node);
- // The ClassOf operator has a single value input and control input.
- Node* control_input = NodeProperties::GetControlInput(node, 0);
- node->TrimInputCount(2);
- node->ReplaceInput(1, control_input);
- NodeProperties::ChangeOp(node, simplified()->ClassOf());
- return Changed(node);
-}
-
Reduction JSIntrinsicLowering::ReduceStringMaxLength(Node* node) {
Node* value = jsgraph()->Constant(String::kMaxLength);
ReplaceWithValue(node, value);
@@ -438,12 +458,12 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
int stack_parameter_count) {
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), stack_parameter_count,
CallDescriptor::kNeedsFrameState, node->op()->properties());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 81cf5467d5..fb745986a6 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -44,8 +44,11 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceCreateJSGeneratorObject(Node* node);
Reduction ReduceGeneratorClose(Node* node);
- Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
+ Reduction ReduceAsyncFunctionAwaitCaught(Node* node);
+ Reduction ReduceAsyncFunctionAwaitUncaught(Node* node);
+ Reduction ReduceAsyncGeneratorAwaitCaught(Node* node);
+ Reduction ReduceAsyncGeneratorAwaitUncaught(Node* node);
Reduction ReduceAsyncGeneratorReject(Node* node);
Reduction ReduceAsyncGeneratorResolve(Node* node);
Reduction ReduceAsyncGeneratorYield(Node* node);
@@ -54,6 +57,8 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
+ Reduction ReduceRejectPromise(Node* node);
+ Reduction ReduceResolvePromise(Node* node);
Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToNumber(Node* node);
@@ -72,9 +77,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
// converted to proper CodeStubAssembler based builtins.
Reduction ReduceTheHole(Node* node);
- // TODO(turbofan): JavaScript builtins support; drop once all uses of
- // %_ClassOf in JavaScript builtins are eliminated.
- Reduction ReduceClassOf(Node* node);
Reduction ReduceStringMaxLength(Node* node);
Reduction Change(Node* node, const Operator* op);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index b2f8c567e2..35e0a551db 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -76,6 +76,10 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
+ case IrOpcode::kJSPromiseResolve:
+ return ReduceJSPromiseResolve(node);
+ case IrOpcode::kJSResolvePromise:
+ return ReduceJSResolvePromise(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSLoadGlobal:
@@ -168,7 +172,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
if (m.HasValue() && m.Value()->IsJSObject()) {
receiver = Handle<JSObject>::cast(m.Value());
} else if (p.feedback().IsValid()) {
- InstanceOfICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (!nexus.GetConstructorFeedback().ToHandle(&receiver)) return NoChange();
} else {
return NoChange();
@@ -411,6 +415,87 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
return NoChange();
}
+// ES section #sec-promise-resolve
+Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSPromiseResolve, node->opcode());
+ Node* constructor = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if the {constructor} is the %Promise% function.
+ HeapObjectMatcher m(constructor);
+ if (!m.Is(handle(native_context()->promise_function()))) return NoChange();
+
+ // Check if we know something about the {value}.
+ ZoneHandleSet<Map> value_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(value, effect, &value_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, value_maps.size());
+
+ // Check that the {value} cannot be a JSPromise.
+ for (Handle<Map> const value_map : value_maps) {
+ if (value_map->IsJSPromiseMap()) return NoChange();
+ }
+
+ // Create a %Promise% instance and resolve it with {value}.
+ Node* promise = effect =
+ graph()->NewNode(javascript()->CreatePromise(), context, effect);
+ effect = graph()->NewNode(javascript()->ResolvePromise(), promise, value,
+ context, frame_state, effect, control);
+ ReplaceWithValue(node, promise, effect, control);
+ return Replace(promise);
+}
+
+// ES section #sec-promise-resolve-functions
+Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode());
+ Node* promise = NodeProperties::GetValueInput(node, 0);
+ Node* resolution = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if we know something about the {resolution}.
+ ZoneHandleSet<Map> resolution_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(resolution, effect, &resolution_maps);
+ if (result != NodeProperties::kReliableReceiverMaps) return NoChange();
+ DCHECK_NE(0, resolution_maps.size());
+
+ // Compute property access info for "then" on {resolution}.
+ PropertyAccessInfo access_info;
+ AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ graph()->zone());
+ if (!access_info_factory.ComputePropertyAccessInfo(
+ MapHandles(resolution_maps.begin(), resolution_maps.end()),
+ factory()->then_string(), AccessMode::kLoad, &access_info)) {
+ return NoChange();
+ }
+
+ // We can further optimize the case where {resolution}
+ // definitely doesn't have a "then" property.
+ if (!access_info.IsNotFound()) return NoChange();
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+
+ // Add proper dependencies on the {resolution}s [[Prototype]]s.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ access_builder.AssumePrototypesStable(native_context(),
+ access_info.receiver_maps(), holder);
+ }
+
+ // Simply fulfill the {promise} with the {resolution}.
+ Node* value = effect =
+ graph()->NewNode(javascript()->FulfillPromise(), promise, resolution,
+ context, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -945,16 +1030,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
}
- // Check if the {nexus} reports type feedback for the IC.
- if (nexus.IsUninitialized()) {
- if (flags() & kBailoutOnUninitialized) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
- }
- return NoChange();
- }
-
// Extract receiver maps from the IC using the {nexus}.
MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
@@ -967,6 +1042,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
}
return NoChange();
}
+ DCHECK(!nexus.IsUninitialized());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode);
@@ -1007,9 +1083,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
}
- // Extract receiver maps from the load IC using the LoadICNexus.
+ // Extract receiver maps from the load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
@@ -1022,9 +1098,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
NamedAccess const& p = NamedAccessOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the store IC using the StoreICNexus.
+ // Extract receiver maps from the store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
@@ -1036,9 +1112,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the IC using the StoreOwnICNexus.
+ // Extract receiver maps from the IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- StoreOwnICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the creation of a named property based on the {receiver_maps}.
return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
@@ -1264,9 +1340,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
return Replace(value);
}
-template <typename KeyedICNexus>
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
- Node* node, Node* index, Node* value, KeyedICNexus const& nexus,
+ Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
AccessMode access_mode, KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -1354,16 +1429,6 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
}
- // Check if the {nexus} reports type feedback for the IC.
- if (nexus.IsUninitialized()) {
- if (flags() & kBailoutOnUninitialized) {
- return ReduceSoftDeoptimize(
- node,
- DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
- }
- return NoChange();
- }
-
// Extract receiver maps from the {nexus}.
MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
@@ -1376,6 +1441,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
return NoChange();
}
+ DCHECK(!nexus.IsUninitialized());
// Optimize access for constant {index}.
HeapObjectMatcher mindex(index);
@@ -1543,9 +1609,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
}
}
- // Extract receiver maps from the keyed load IC using the KeyedLoadICNexus.
+ // Extract receiver maps from the keyed load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- KeyedLoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Extract the keyed access load mode from the keyed load IC.
KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
@@ -1561,9 +1627,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the KeyedStoreICNexus.
+ // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- KeyedStoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
// Extract the keyed access store mode from the keyed store IC.
KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
@@ -1663,7 +1729,7 @@ Node* JSNativeContextSpecialization::InlineApiCall(
CallApiCallbackStub stub(isolate(), argc);
CallInterfaceDescriptor call_interface_descriptor =
stub.GetCallInterfaceDescriptor();
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), call_interface_descriptor,
call_interface_descriptor.GetStackParameterCount() + argc +
1 /* implicit receiver */,
@@ -1960,8 +2026,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
if (!p.feedback().IsValid()) return NoChange();
- StoreDataPropertyInLiteralICNexus nexus(p.feedback().vector(),
- p.feedback().slot());
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
return NoChange();
}
@@ -2124,12 +2189,18 @@ JSNativeContextSpecialization::BuildElementAccess(
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- // Check that the {index} is a valid array index, we do the actual
- // bounds check below and just skip the store below if it's out of
+ // Only check that the {index} is in Signed32 range. We do the actual
+ // bounds check below and just skip the property access if it's out of
// bounds for the {receiver}.
index = effect = graph()->NewNode(
- simplified()->CheckBounds(VectorSlotPair()), index,
- jsgraph()->Constant(Smi::kMaxValue), effect, control);
+ simplified()->SpeculativeToNumber(NumberOperationHint::kSigned32,
+ VectorSlotPair()),
+ index, effect, control);
+
+ // Cast the {index} to Unsigned32 range, so that the bounds checks
+ // below are performed on unsigned values, which means that all the
+ // Negative32 values are treated as out-of-bounds.
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
} else {
// Check that the {index} is in the valid range for the {receiver}.
index = effect =
@@ -2193,10 +2264,10 @@ JSNativeContextSpecialization::BuildElementAccess(
case AccessMode::kStore: {
// Ensure that the {value} is actually a Number or an Oddball,
// and truncate it to a Number appropriately.
- value = effect =
- graph()->NewNode(simplified()->SpeculativeToNumber(
- NumberOperationHint::kNumberOrOddball),
- value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, VectorSlotPair()),
+ value, effect, control);
// Introduce the appropriate truncation for {value}. Currently we
// only need to do this for ClamedUint8Array {receiver}s, as the
@@ -2246,10 +2317,11 @@ JSNativeContextSpecialization::BuildElementAccess(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
effect, control);
- // Don't try to store to a copy-on-write backing store.
+ // Don't try to store to a copy-on-write backing store (unless supported by
+ // the store mode).
if (access_mode == AccessMode::kStore &&
IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+ !IsCOWHandlingStoreMode(store_mode)) {
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
@@ -2459,6 +2531,15 @@ JSNativeContextSpecialization::BuildElementAccess(
simplified()->MaybeGrowFastElements(mode, VectorSlotPair()),
receiver, elements, index, elements_length, effect, control);
+ // If we didn't grow {elements}, it might still be COW, in which case we
+ // copy it now.
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW) {
+ elements = effect =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, effect, control);
+ }
+
// Also update the "length" property if {receiver} is a JSArray.
if (receiver_is_jsarray) {
Node* check =
@@ -2524,13 +2605,16 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, if_true);
+ Node* etrue;
+ Node* vtrue = etrue = graph()->NewNode(
+ simplified()->StringCharAt(), receiver, masked_index, *effect, if_true);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse = jsgraph()->UndefinedConstant();
*control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ *effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, *effect, *control);
return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, *control);
} else {
@@ -2543,8 +2627,10 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
// Return the character from the {receiver} as single character string.
- return graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, *control);
+ Node* value = *effect =
+ graph()->NewNode(simplified()->StringCharAt(), receiver, masked_index,
+ *effect, *control);
+ return value;
}
}
@@ -2652,6 +2738,7 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
MapHandles* receiver_maps) {
DCHECK_EQ(0, receiver_maps->size());
+ if (nexus.IsUninitialized()) return true;
// See if we can infer a concrete type for the {receiver}.
if (InferReceiverMaps(receiver, effect, receiver_maps)) {
// We can assume that the {receiver} still has the inferred {receiver_maps}.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 879203c1dd..6df48d6e23 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -62,6 +62,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSHasInPrototypeChain(Node* node);
Reduction ReduceJSOrdinaryHasInstance(Node* node);
+ Reduction ReduceJSPromiseResolve(Node* node);
+ Reduction ReduceJSResolvePromise(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSStoreGlobal(Node* node);
@@ -77,9 +79,9 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode);
- template <typename KeyedICNexus>
Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
- KeyedICNexus const& nexus, AccessMode access_mode,
+ FeedbackNexus const& nexus,
+ AccessMode access_mode,
KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode);
Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 0ddf859cff..31be6d9979 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -448,7 +448,8 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
- lhs.feedback() == rhs.feedback() &&
+ lhs.code().location() == rhs.code().location() &&
+ lhs.feedback_cell().location() == rhs.feedback_cell().location() &&
lhs.shared_info().location() == rhs.shared_info().location();
}
@@ -461,12 +462,13 @@ bool operator!=(CreateClosureParameters const& lhs,
size_t hash_value(CreateClosureParameters const& p) {
return base::hash_combine(p.pretenure(), p.shared_info().location(),
- p.feedback());
+ p.feedback_cell().location());
}
std::ostream& operator<<(std::ostream& os, CreateClosureParameters const& p) {
- return os << p.pretenure() << ", " << Brief(*p.shared_info());
+ return os << p.pretenure() << ", " << Brief(*p.shared_info()) << ", "
+ << Brief(*p.feedback_cell()) << ", " << Brief(*p.code());
}
@@ -543,42 +545,50 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
return OpParameter<CompareOperationHint>(op);
}
-#define CACHED_OP_LIST(V) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1) \
- V(Exponentiate, Operator::kNoProperties, 2, 1) \
- V(BitwiseNot, Operator::kNoProperties, 1, 1) \
- V(Decrement, Operator::kNoProperties, 1, 1) \
- V(Increment, Operator::kNoProperties, 1, 1) \
- V(Negate, Operator::kNoProperties, 1, 1) \
- V(ToInteger, Operator::kNoProperties, 1, 1) \
- V(ToLength, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToNumeric, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kFoldable, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kNoProperties, 2, 1) \
- V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
- V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
- V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
- V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
- V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
- V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
- V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
- V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
- V(StackCheck, Operator::kNoWrite, 0, 0) \
- V(Debugger, Operator::kNoProperties, 0, 0) \
+#define CACHED_OP_LIST(V) \
+ V(BitwiseOr, Operator::kNoProperties, 2, 1) \
+ V(BitwiseXor, Operator::kNoProperties, 2, 1) \
+ V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
+ V(ShiftLeft, Operator::kNoProperties, 2, 1) \
+ V(ShiftRight, Operator::kNoProperties, 2, 1) \
+ V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
+ V(Subtract, Operator::kNoProperties, 2, 1) \
+ V(Multiply, Operator::kNoProperties, 2, 1) \
+ V(Divide, Operator::kNoProperties, 2, 1) \
+ V(Modulus, Operator::kNoProperties, 2, 1) \
+ V(Exponentiate, Operator::kNoProperties, 2, 1) \
+ V(BitwiseNot, Operator::kNoProperties, 1, 1) \
+ V(Decrement, Operator::kNoProperties, 1, 1) \
+ V(Increment, Operator::kNoProperties, 1, 1) \
+ V(Negate, Operator::kNoProperties, 1, 1) \
+ V(ToInteger, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToNumeric, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kNoProperties, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(CreateStringIterator, Operator::kEliminatable, 1, 1) \
+ V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
+ V(CreatePromise, Operator::kEliminatable, 0, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
+ V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
+ V(ForInEnumerate, Operator::kNoProperties, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
+ V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
+ V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreContext, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
+ V(StackCheck, Operator::kNoWrite, 0, 0) \
+ V(Debugger, Operator::kNoProperties, 0, 0) \
+ V(FulfillPromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
+ V(PerformPromiseThen, Operator::kNoDeopt | Operator::kNoThrow, 4, 1) \
+ V(PromiseResolve, Operator::kNoProperties, 2, 1) \
+ V(RejectPromise, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
+ V(ResolvePromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
#define BINARY_OP_LIST(V) V(Add)
@@ -749,6 +759,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode,
SpeculationMode speculation_mode) {
+ DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation,
+ feedback.IsValid());
CallParameters parameters(arity, frequency, feedback, convert_mode,
speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
@@ -769,6 +781,8 @@ const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
const Operator* JSOperatorBuilder::CallWithSpread(
uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback,
SpeculationMode speculation_mode) {
+ DCHECK_IMPLIES(speculation_mode == SpeculationMode::kAllowSpeculation,
+ feedback.IsValid());
CallParameters parameters(arity, frequency, feedback,
ConvertReceiverMode::kAny, speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
@@ -1048,9 +1062,10 @@ const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
}
const Operator* JSOperatorBuilder::CreateClosure(
- Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
- PretenureFlag pretenure) {
- CreateClosureParameters parameters(shared_info, feedback, pretenure);
+ Handle<SharedFunctionInfo> shared_info, Handle<FeedbackCell> feedback_cell,
+ Handle<Code> code, PretenureFlag pretenure) {
+ CreateClosureParameters parameters(shared_info, feedback_cell, code,
+ pretenure);
return new (zone()) Operator1<CreateClosureParameters>( // --
IrOpcode::kJSCreateClosure, Operator::kEliminatable, // opcode
"JSCreateClosure", // name
@@ -1155,6 +1170,10 @@ const Operator* JSOperatorBuilder::CreateBlockContext(
scope_info); // parameter
}
+#undef BINARY_OP_LIST
+#undef CACHED_OP_LIST
+#undef COMPARE_OP_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 3875234d5a..959a83026c 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -18,6 +18,7 @@ namespace internal {
class AllocationSite;
class BoilerplateDescription;
class ConstantElementsPair;
+class FeedbackCell;
class SharedFunctionInfo;
namespace compiler {
@@ -533,18 +534,23 @@ const CreateBoundFunctionParameters& CreateBoundFunctionParametersOf(
class CreateClosureParameters final {
public:
CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
- VectorSlotPair const& feedback,
+ Handle<FeedbackCell> feedback_cell, Handle<Code> code,
PretenureFlag pretenure)
- : shared_info_(shared_info), feedback_(feedback), pretenure_(pretenure) {}
+ : shared_info_(shared_info),
+ feedback_cell_(feedback_cell),
+ code_(code),
+ pretenure_(pretenure) {}
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- VectorSlotPair const& feedback() const { return feedback_; }
+ Handle<FeedbackCell> feedback_cell() const { return feedback_cell_; }
+ Handle<Code> code() const { return code_; }
PretenureFlag pretenure() const { return pretenure_; }
private:
- const Handle<SharedFunctionInfo> shared_info_;
- VectorSlotPair const feedback_;
- const PretenureFlag pretenure_;
+ Handle<SharedFunctionInfo> const shared_info_;
+ Handle<FeedbackCell> const feedback_cell_;
+ Handle<Code> const code_;
+ PretenureFlag const pretenure_;
};
bool operator==(CreateClosureParameters const&, CreateClosureParameters const&);
@@ -652,10 +658,13 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
- VectorSlotPair const& feedback,
- PretenureFlag pretenure);
+ Handle<FeedbackCell> feedback_cell,
+ Handle<Code> code,
+ PretenureFlag pretenure = NOT_TENURED);
const Operator* CreateIterResultObject();
+ const Operator* CreateStringIterator();
const Operator* CreateKeyValueArray();
+ const Operator* CreatePromise();
const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
VectorSlotPair const& feedback,
int literal_flags, int number_of_elements);
@@ -675,12 +684,12 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
- SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
+ SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
const Operator* CallWithArrayLike(CallFrequency frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
- SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
+ SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
@@ -727,7 +736,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadModule(int32_t cell_index);
const Operator* StoreModule(int32_t cell_index);
- const Operator* ClassOf();
const Operator* HasInPrototypeChain();
const Operator* InstanceOf(const VectorSlotPair& feedback);
const Operator* OrdinaryHasInstance();
@@ -742,8 +750,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
// Used to implement Ignition's SuspendGenerator bytecode.
const Operator* GeneratorStore(int register_count);
- // Used to implement Ignition's RestoreGeneratorState bytecode.
+ // Used to implement Ignition's SwitchOnGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
+ const Operator* GeneratorRestoreContext();
+
// Used to implement Ignition's ResumeGenerator bytecode.
const Operator* GeneratorRestoreRegister(int index);
const Operator* GeneratorRestoreInputOrDebugPos();
@@ -751,6 +761,12 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StackCheck();
const Operator* Debugger();
+ const Operator* FulfillPromise();
+ const Operator* PerformPromiseThen();
+ const Operator* PromiseResolve();
+ const Operator* RejectPromise();
+ const Operator* ResolvePromise();
+
const Operator* CreateFunctionContext(int slot_count, ScopeType scope_type);
const Operator* CreateCatchContext(const Handle<String>& name,
const Handle<ScopeInfo>& scope_info);
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 0ec63600a2..fac87bc685 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -60,14 +60,12 @@ class JSSpeculativeBinopBuilder final {
slot_(slot) {}
BinaryOperationHint GetBinaryOperationHint() {
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot_));
- BinaryOpICNexus nexus(feedback_vector(), slot_);
+ FeedbackNexus nexus(feedback_vector(), slot_);
return nexus.GetBinaryOperationFeedback();
}
CompareOperationHint GetCompareOperationHint() {
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot_));
- CompareICNexus nexus(feedback_vector(), slot_);
+ FeedbackNexus nexus(feedback_vector(), slot_);
return nexus.GetCompareOperationFeedback();
}
@@ -218,7 +216,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
const Operator* op, Node* operand, Node* effect, Node* control,
FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForUnaryOperation)) {
@@ -282,7 +280,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
switch (op->opcode()) {
case IrOpcode::kJSStrictEqual: {
DCHECK(!slot.IsInvalid());
- CompareICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
@@ -298,7 +296,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual: {
DCHECK(!slot.IsInvalid());
- CompareICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
@@ -312,7 +310,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
case IrOpcode::kJSInstanceOf: {
DCHECK(!slot.IsInvalid());
- InstanceOfICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation)) {
@@ -334,7 +332,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus: {
DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation)) {
@@ -361,7 +359,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceForInNextOperation(
Node* receiver, Node* cache_array, Node* cache_type, Node* index,
Node* effect, Node* control, FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- ForInICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
@@ -375,7 +373,7 @@ JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
Node* control,
FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- ForInICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForForIn)) {
@@ -387,13 +385,13 @@ JSTypeHintLowering::ReduceForInPrepareOperation(Node* enumerator, Node* effect,
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceToNumberOperation(
Node* input, Node* effect, Node* control, FeedbackSlot slot) const {
DCHECK(!slot.IsInvalid());
- BinaryOpICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
NumberOperationHint hint;
if (BinaryOperationHintToNumberOperationHint(
nexus.GetBinaryOperationFeedback(), &hint)) {
Node* node = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->SpeculativeToNumber(hint), input, effect,
- control);
+ jsgraph()->simplified()->SpeculativeToNumber(hint, VectorSlotPair()),
+ input, effect, control);
return LoweringResult::SideEffectFree(node, node, control);
}
return LoweringResult::NoChange();
@@ -405,7 +403,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceCallOperation(
DCHECK(op->opcode() == IrOpcode::kJSCall ||
op->opcode() == IrOpcode::kJSCallWithSpread);
DCHECK(!slot.IsInvalid());
- CallICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
@@ -420,7 +418,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceConstructOperation(
DCHECK(op->opcode() == IrOpcode::kJSConstruct ||
op->opcode() == IrOpcode::kJSConstructWithSpread);
DCHECK(!slot.IsInvalid());
- CallICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) {
@@ -434,7 +432,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
DCHECK(!slot.IsInvalid());
- LoadICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
@@ -448,7 +446,7 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadKeyedOperation(
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
DCHECK(!slot.IsInvalid());
- KeyedLoadICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
@@ -465,7 +463,7 @@ JSTypeHintLowering::ReduceStoreNamedOperation(const Operator* op, Node* obj,
DCHECK(op->opcode() == IrOpcode::kJSStoreNamed ||
op->opcode() == IrOpcode::kJSStoreNamedOwn);
DCHECK(!slot.IsInvalid());
- StoreICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
@@ -481,7 +479,7 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
FeedbackSlot slot) const {
DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
DCHECK(!slot.IsInvalid());
- KeyedStoreICNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
nexus, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess)) {
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index c265caf9f0..b3cd43ff71 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -504,6 +504,13 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
+ if (BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ // Always bake in String feedback into the graph.
+ // TODO(bmeurer): Consider adding a SpeculativeStringAdd operator,
+ // and use that in JSTypeHintLowering instead of looking at the
+ // binary operation feedback here.
+ r.CheckInputsToString();
+ }
if (r.OneInputIs(Type::String())) {
// We know that (at least) one input is already a String,
// so try to strength-reduce the non-String input.
@@ -539,7 +546,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
return ReduceCreateConsString(node);
}
// Eliminate useless concatenation of empty string.
- if (BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ if (r.BothInputsAre(Type::String())) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (r.LeftInputIs(empty_string_type_)) {
@@ -573,15 +580,17 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
// JSAdd(x, y:string) => CallStub[StringAdd](x, y)
Callable const callable =
CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, properties);
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- NodeProperties::ChangeOp(node, common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
+ // We never get here when we had String feedback.
+ DCHECK_NE(BinaryOperationHint::kString, BinaryOperationHintOf(node->op()));
return NoChange();
}
@@ -1092,12 +1101,13 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
{
// Convert {receiver} using the ToObjectStub.
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
- rfalse = efalse = if_false = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- receiver, context, frame_state, efalse, if_false);
+ rfalse = efalse = if_false =
+ graph()->NewNode(common()->Call(call_descriptor),
+ jsgraph()->HeapConstant(callable.code()), receiver,
+ context, frame_state, efalse, if_false);
}
// Update potential {IfException} uses of {node} to point to the above
@@ -1491,10 +1501,10 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
static const int kReturnCount = 1;
const char* debug_name = Builtins::name(builtin_index);
Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetCEntryStubCallDescriptor(
+ auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
zone, kReturnCount, argc, debug_name, properties, flags);
- NodeProperties::ChangeOp(node, jsgraph->common()->Call(desc));
+ NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
@@ -1666,6 +1676,12 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ if (function->shared()->HasBreakInfo()) {
+ // Do not inline the call if we need to check whether to break at entry.
+ return NoChange();
+ }
+
const int builtin_index = shared->code()->builtin_index();
const bool is_builtin = (builtin_index != -1);
@@ -1697,6 +1713,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
Node* argument_count = jsgraph()->Constant(arity);
+
if (NeedsArgumentAdaptorFrame(shared, arity)) {
// Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
Callable callable = CodeFactory::ArgumentAdaptor(isolate());
@@ -1745,7 +1762,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
if (p.convert_mode() != convert_mode) {
NodeProperties::ChangeOp(
node, javascript()->Call(p.arity(), p.frequency(), p.feedback(),
- convert_mode));
+ convert_mode, p.speculation_mode()));
return Changed(node);
}
@@ -1825,12 +1842,13 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
// {receiver} (does the ToName conversion implicitly).
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kForInFilter);
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState);
- vfalse = efalse = if_false = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
- receiver, context, frame_state, effect, if_false);
+ vfalse = efalse = if_false =
+ graph()->NewNode(common()->Call(call_descriptor),
+ jsgraph()->HeapConstant(callable.code()), key,
+ receiver, context, frame_state, effect, if_false);
// Update potential {IfException} uses of {node} to point to the above
// ForInFilter stub call node instead.
@@ -2034,9 +2052,11 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
for (int i = 0; i < register_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, 3 + i);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
- value, effect, control);
+ if (value != jsgraph()->OptimizedOutConstant()) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
+ value, effect, control);
+ }
}
effect = graph()->NewNode(simplified()->StoreField(context_field), generator,
@@ -2069,6 +2089,21 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreContinuation(Node* node) {
return Changed(continuation);
}
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreContext, node->opcode());
+
+ const Operator* new_op =
+ simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
+
+ // Mutate the node in-place.
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ DCHECK(!OperatorProperties::HasContextInput(new_op));
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
DCHECK_EQ(IrOpcode::kJSGeneratorRestoreRegister, node->opcode());
Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -2190,6 +2225,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSGeneratorStore(node);
case IrOpcode::kJSGeneratorRestoreContinuation:
return ReduceJSGeneratorRestoreContinuation(node);
+ case IrOpcode::kJSGeneratorRestoreContext:
+ return ReduceJSGeneratorRestoreContext(node);
case IrOpcode::kJSGeneratorRestoreRegister:
return ReduceJSGeneratorRestoreRegister(node);
case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index d72303f495..72ce4fb8dd 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -25,6 +25,8 @@ class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
class TypeCache;
+enum Signedness { kSigned, kUnsigned };
+
// Lowers JS-level operators to simplified operators based on types.
class V8_EXPORT_PRIVATE JSTypedLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -72,6 +74,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSStoreMessage(Node* node);
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
+ Reduction ReduceJSGeneratorRestoreContext(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
Reduction ReduceJSGeneratorRestoreInputOrDebugPos(Node* node);
Reduction ReduceNumberBinop(Node* node);
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index c2a84cc9b5..933ccc0a9c 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -14,6 +14,8 @@ namespace compiler {
if (FLAG_trace_turbo_jt) PrintF(__VA_ARGS__); \
} while (false)
+namespace {
+
struct JumpThreadingState {
bool forwarded;
ZoneVector<RpoNumber>& result;
@@ -53,6 +55,19 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
+bool IsBlockWithBranchPoisoning(InstructionSequence* code,
+ InstructionBlock* block) {
+ if (block->PredecessorCount() != 1) return false;
+ RpoNumber pred_rpo = (block->predecessors())[0];
+ const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo);
+ if (pred->code_start() == pred->code_end()) return false;
+ Instruction* instr = code->InstructionAt(pred->code_end() - 1);
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ return mode == kFlags_branch_and_poison;
+}
+
+} // namespace
+
bool JumpThreading::ComputeForwarding(Zone* local_zone,
ZoneVector<RpoNumber>& result,
InstructionSequence* code,
@@ -72,46 +87,48 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
// Process the instructions in a block up to a non-empty instruction.
TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
block->rpo_number().ToInt());
- bool fallthru = true;
RpoNumber fw = block->rpo_number();
- for (int i = block->code_start(); i < block->code_end(); ++i) {
- Instruction* instr = code->InstructionAt(i);
- if (!instr->AreMovesRedundant()) {
- // can't skip instructions with non redundant moves.
- TRACE(" parallel move\n");
- fallthru = false;
- } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- // can't skip instructions with flags continuations.
- TRACE(" flags\n");
- fallthru = false;
- } else if (instr->IsNop()) {
- // skip nops.
- TRACE(" nop\n");
- continue;
- } else if (instr->arch_opcode() == kArchJmp) {
- // try to forward the jump instruction.
- TRACE(" jmp\n");
- // if this block deconstructs the frame, we can't forward it.
- // TODO(mtrofin): we can still forward if we end up building
- // the frame at start. So we should move the decision of whether
- // to build a frame or not in the register allocator, and trickle it
- // here and to the code generator.
- if (frame_at_start ||
- !(block->must_deconstruct_frame() ||
- block->must_construct_frame())) {
- fw = code->InputRpo(instr, 0);
+ if (!IsBlockWithBranchPoisoning(code, block)) {
+ bool fallthru = true;
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ Instruction* instr = code->InstructionAt(i);
+ if (!instr->AreMovesRedundant()) {
+ // can't skip instructions with non redundant moves.
+ TRACE(" parallel move\n");
+ fallthru = false;
+ } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ // can't skip instructions with flags continuations.
+ TRACE(" flags\n");
+ fallthru = false;
+ } else if (instr->IsNop()) {
+ // skip nops.
+ TRACE(" nop\n");
+ continue;
+ } else if (instr->arch_opcode() == kArchJmp) {
+ // try to forward the jump instruction.
+ TRACE(" jmp\n");
+ // if this block deconstructs the frame, we can't forward it.
+ // TODO(mtrofin): we can still forward if we end up building
+ // the frame at start. So we should move the decision of whether
+ // to build a frame or not in the register allocator, and trickle it
+ // here and to the code generator.
+ if (frame_at_start || !(block->must_deconstruct_frame() ||
+ block->must_construct_frame())) {
+ fw = code->InputRpo(instr, 0);
+ }
+ fallthru = false;
+ } else {
+ // can't skip other instructions.
+ TRACE(" other\n");
+ fallthru = false;
}
- fallthru = false;
- } else {
- // can't skip other instructions.
- TRACE(" other\n");
- fallthru = false;
+ break;
+ }
+ if (fallthru) {
+ int next = 1 + block->rpo_number().ToInt();
+ if (next < code->InstructionBlockCount())
+ fw = RpoNumber::FromInt(next);
}
- break;
- }
- if (fallthru) {
- int next = 1 + block->rpo_number().ToInt();
- if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
}
state.Forward(fw);
}
@@ -155,7 +172,8 @@ void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
bool fallthru = true;
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
- if (FlagsModeField::decode(instr->opcode()) == kFlags_branch) {
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ if (mode == kFlags_branch || mode == kFlags_branch_and_poison) {
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp) {
if (skip[block_num]) {
diff --git a/deps/v8/src/compiler/jump-threading.h b/deps/v8/src/compiler/jump-threading.h
index 84520ba3ed..3a378d0499 100644
--- a/deps/v8/src/compiler/jump-threading.h
+++ b/deps/v8/src/compiler/jump-threading.h
@@ -30,4 +30,4 @@ class JumpThreading {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_JUMP_THREADING_H
+#endif // V8_COMPILER_JUMP_THREADING_H_
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 5df50e64f5..7ccad439d9 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -179,10 +179,8 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
return false;
// Some inline intrinsics are also safe to call without a FrameState.
- case Runtime::kInlineClassOf:
case Runtime::kInlineCreateIterResultObject:
case Runtime::kInlineGeneratorClose:
- case Runtime::kInlineGeneratorGetContext:
case Runtime::kInlineGeneratorGetInputOrDebugPos:
case Runtime::kInlineGeneratorGetResumeMode:
case Runtime::kInlineCreateJSGeneratorObject:
@@ -462,6 +460,8 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
// The target for interpreter dispatches is a code entry address.
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
+ const CallDescriptor::Flags kFlags =
+ CallDescriptor::kCanUseRoots | CallDescriptor::kFixedTargetRegister;
return new (zone) CallDescriptor( // --
CallDescriptor::kCallAddress, // kind
target_type, // target MachineType
@@ -471,7 +471,7 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
Operator::kNoProperties, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kCanUseRoots, // flags
+ kFlags, // flags
descriptor.DebugName(isolate));
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index ade1d6902f..5b08bc7f7c 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -184,7 +184,10 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Push argument count as part of function prologue.
kPushArgumentCount = 1u << 5,
// Use retpoline for this call if indirect.
- kRetpoline = 1u << 6
+ kRetpoline = 1u << 6,
+ // Use the kJavaScriptCallCodeStartRegister (fixed) register for the
+ // indirect target address when calling.
+ kFixedTargetRegister = 1u << 7
};
typedef base::Flags<Flag> Flags;
diff --git a/deps/v8/src/compiler/live-range-separator.h b/deps/v8/src/compiler/live-range-separator.h
index 6aaf6b69e6..0d48f25e5d 100644
--- a/deps/v8/src/compiler/live-range-separator.h
+++ b/deps/v8/src/compiler/live-range-separator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LIVE_RANGE_SEPARATOR_H_
-#define V8_LIVE_RANGE_SEPARATOR_H_
+#ifndef V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
+#define V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
#include "src/zone/zone.h"
namespace v8 {
@@ -61,4 +61,4 @@ class LiveRangeMerger final : public ZoneObject {
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif // V8_LIVE_RANGE_SEPARATOR_H_
+#endif // V8_COMPILER_LIVE_RANGE_SEPARATOR_H_
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 7888f5a21e..a3b0eda15f 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -821,9 +821,11 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
state = state->SetMaps(
node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
} else {
- // We know that the resulting elements have the fixed array map.
- state = state->SetMaps(
- node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
+ // We know that the resulting elements have the fixed array map or the COW
+ // version thereof (if we didn't grow and it was already COW before).
+ ZoneHandleSet<Map> fixed_array_maps(factory()->fixed_array_map());
+ fixed_array_maps.insert(factory()->fixed_cow_array_map(), zone());
+ state = state->SetMaps(node, fixed_array_maps, zone());
}
// Kill the previous elements on {object}.
state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
@@ -1344,7 +1346,7 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
if (kDoubleSize != kPointerSize) {
return -1; // We currently only track pointer size fields.
}
- // Fall through.
+ break;
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 1e93de5124..97d712f125 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -29,6 +29,7 @@ LoopVariableOptimizer::LoopVariableOptimizer(Graph* graph,
common_(common),
zone_(zone),
limits_(graph->NodeCount(), zone),
+ reduced_(graph->NodeCount(), zone),
induction_vars_(zone) {}
void LoopVariableOptimizer::Run() {
@@ -40,13 +41,13 @@ void LoopVariableOptimizer::Run() {
queue.pop();
queued.Set(node, false);
- DCHECK_NULL(limits_[node->id()]);
+ DCHECK(!reduced_.Get(node));
bool all_inputs_visited = true;
int inputs_end = (node->opcode() == IrOpcode::kLoop)
? kFirstBackedge
: node->op()->ControlInputCount();
for (int i = 0; i < inputs_end; i++) {
- if (limits_[NodeProperties::GetControlInput(node, i)->id()] == nullptr) {
+ if (!reduced_.Get(NodeProperties::GetControlInput(node, i))) {
all_inputs_visited = false;
break;
}
@@ -54,7 +55,7 @@ void LoopVariableOptimizer::Run() {
if (!all_inputs_visited) continue;
VisitNode(node);
- DCHECK_NOT_NULL(limits_[node->id()]);
+ reduced_.Set(node, true);
// Queue control outputs.
for (Edge edge : node->use_edges()) {
@@ -73,80 +74,6 @@ void LoopVariableOptimizer::Run() {
}
}
-class LoopVariableOptimizer::Constraint : public ZoneObject {
- public:
- InductionVariable::ConstraintKind kind() const { return kind_; }
- Node* left() const { return left_; }
- Node* right() const { return right_; }
-
- const Constraint* next() const { return next_; }
-
- Constraint(Node* left, InductionVariable::ConstraintKind kind, Node* right,
- const Constraint* next)
- : left_(left), right_(right), kind_(kind), next_(next) {}
-
- private:
- Node* left_;
- Node* right_;
- InductionVariable::ConstraintKind kind_;
- const Constraint* next_;
-};
-
-class LoopVariableOptimizer::VariableLimits : public ZoneObject {
- public:
- static VariableLimits* Empty(Zone* zone) {
- return new (zone) VariableLimits();
- }
-
- VariableLimits* Copy(Zone* zone) const {
- return new (zone) VariableLimits(this);
- }
-
- void Add(Node* left, InductionVariable::ConstraintKind kind, Node* right,
- Zone* zone) {
- head_ = new (zone) Constraint(left, kind, right, head_);
- limit_count_++;
- }
-
- void Merge(const VariableLimits* other) {
- // Change the current condition list to a longest common tail
- // of this condition list and the other list. (The common tail
- // should correspond to the list from the common dominator.)
-
- // First, we throw away the prefix of the longer list, so that
- // we have lists of the same length.
- size_t other_size = other->limit_count_;
- const Constraint* other_limit = other->head_;
- while (other_size > limit_count_) {
- other_limit = other_limit->next();
- other_size--;
- }
- while (limit_count_ > other_size) {
- head_ = head_->next();
- limit_count_--;
- }
-
- // Then we go through both lists in lock-step until we find
- // the common tail.
- while (head_ != other_limit) {
- DCHECK_LT(0, limit_count_);
- limit_count_--;
- other_limit = other_limit->next();
- head_ = head_->next();
- }
- }
-
- const Constraint* head() const { return head_; }
-
- private:
- VariableLimits() {}
- explicit VariableLimits(const VariableLimits* other)
- : head_(other->head_), limit_count_(other->limit_count_) {}
-
- const Constraint* head_ = nullptr;
- size_t limit_count_ = 0;
-};
-
void InductionVariable::AddUpperBound(Node* bound,
InductionVariable::ConstraintKind kind) {
if (FLAG_trace_turbo_loop) {
@@ -173,21 +100,19 @@ void LoopVariableOptimizer::VisitBackedge(Node* from, Node* loop) {
// Go through the constraints, and update the induction variables in
// this loop if they are involved in the constraint.
- const VariableLimits* limits = limits_[from->id()];
- for (const Constraint* constraint = limits->head(); constraint != nullptr;
- constraint = constraint->next()) {
- if (constraint->left()->opcode() == IrOpcode::kPhi &&
- NodeProperties::GetControlInput(constraint->left()) == loop) {
- auto var = induction_vars_.find(constraint->left()->id());
+ for (Constraint constraint : limits_.Get(from)) {
+ if (constraint.left->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(constraint.left) == loop) {
+ auto var = induction_vars_.find(constraint.left->id());
if (var != induction_vars_.end()) {
- var->second->AddUpperBound(constraint->right(), constraint->kind());
+ var->second->AddUpperBound(constraint.right, constraint.kind);
}
}
- if (constraint->right()->opcode() == IrOpcode::kPhi &&
- NodeProperties::GetControlInput(constraint->right()) == loop) {
- auto var = induction_vars_.find(constraint->right()->id());
+ if (constraint.right->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(constraint.right) == loop) {
+ auto var = induction_vars_.find(constraint.right->id());
if (var != induction_vars_.end()) {
- var->second->AddLowerBound(constraint->left(), constraint->kind());
+ var->second->AddLowerBound(constraint.left, constraint.kind);
}
}
}
@@ -214,11 +139,11 @@ void LoopVariableOptimizer::VisitNode(Node* node) {
void LoopVariableOptimizer::VisitMerge(Node* node) {
// Merge the limits of all incoming edges.
- VariableLimits* merged = limits_[node->InputAt(0)->id()]->Copy(zone());
+ VariableLimits merged = limits_.Get(node->InputAt(0));
for (int i = 1; i < node->InputCount(); i++) {
- merged->Merge(limits_[node->InputAt(i)->id()]);
+ merged.ResetToCommonAncestor(limits_.Get(node->InputAt(i)));
}
- limits_[node->id()] = merged;
+ limits_.Set(node, merged);
}
void LoopVariableOptimizer::VisitLoop(Node* node) {
@@ -230,27 +155,27 @@ void LoopVariableOptimizer::VisitLoop(Node* node) {
void LoopVariableOptimizer::VisitIf(Node* node, bool polarity) {
Node* branch = node->InputAt(0);
Node* cond = branch->InputAt(0);
- VariableLimits* limits = limits_[branch->id()]->Copy(zone());
+ VariableLimits limits = limits_.Get(branch);
// Normalize to less than comparison.
switch (cond->opcode()) {
case IrOpcode::kJSLessThan:
case IrOpcode::kSpeculativeNumberLessThan:
- AddCmpToLimits(limits, cond, InductionVariable::kStrict, polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kStrict, polarity);
break;
case IrOpcode::kJSGreaterThan:
- AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, !polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kNonStrict, !polarity);
break;
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
- AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kNonStrict, polarity);
break;
case IrOpcode::kJSGreaterThanOrEqual:
- AddCmpToLimits(limits, cond, InductionVariable::kStrict, !polarity);
+ AddCmpToLimits(&limits, cond, InductionVariable::kStrict, !polarity);
break;
default:
break;
}
- limits_[node->id()] = limits;
+ limits_.Set(node, limits);
}
void LoopVariableOptimizer::AddCmpToLimits(
@@ -260,19 +185,17 @@ void LoopVariableOptimizer::AddCmpToLimits(
Node* right = node->InputAt(1);
if (FindInductionVariable(left) || FindInductionVariable(right)) {
if (polarity) {
- limits->Add(left, kind, right, zone());
+ limits->PushFront(Constraint{left, kind, right}, zone());
} else {
kind = (kind == InductionVariable::kStrict)
? InductionVariable::kNonStrict
: InductionVariable::kStrict;
- limits->Add(right, kind, left, zone());
+ limits->PushFront(Constraint{right, kind, left}, zone());
}
}
}
-void LoopVariableOptimizer::VisitStart(Node* node) {
- limits_[node->id()] = VariableLimits::Empty(zone());
-}
+void LoopVariableOptimizer::VisitStart(Node* node) { limits_.Set(node, {}); }
void LoopVariableOptimizer::VisitLoopExit(Node* node) {
return TakeConditionsFromFirstControl(node);
@@ -284,10 +207,7 @@ void LoopVariableOptimizer::VisitOtherControl(Node* node) {
}
void LoopVariableOptimizer::TakeConditionsFromFirstControl(Node* node) {
- const VariableLimits* limits =
- limits_[NodeProperties::GetControlInput(node, 0)->id()];
- DCHECK_NOT_NULL(limits);
- limits_[node->id()] = limits;
+ limits_.Set(node, limits_.Get(NodeProperties::GetControlInput(node, 0)));
}
const InductionVariable* LoopVariableOptimizer::FindInductionVariable(
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.h b/deps/v8/src/compiler/loop-variable-optimizer.h
index 9eec614070..8e1d4bfebe 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.h
+++ b/deps/v8/src/compiler/loop-variable-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
#define V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
+#include "src/compiler/functional-list.h"
+#include "src/compiler/node-aux-data.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -82,8 +84,17 @@ class LoopVariableOptimizer {
const int kAssumedLoopEntryIndex = 0;
const int kFirstBackedge = 1;
- class Constraint;
- class VariableLimits;
+ struct Constraint {
+ Node* left;
+ InductionVariable::ConstraintKind kind;
+ Node* right;
+
+ bool operator!=(const Constraint& other) const {
+ return left != other.left || kind != other.kind || right != other.right;
+ }
+ };
+
+ using VariableLimits = FunctionalList<Constraint>;
void VisitBackedge(Node* from, Node* loop);
void VisitNode(Node* node);
@@ -109,7 +120,9 @@ class LoopVariableOptimizer {
Graph* graph_;
CommonOperatorBuilder* common_;
Zone* zone_;
- ZoneVector<const VariableLimits*> limits_;
+ NodeAuxData<VariableLimits> limits_;
+ NodeAuxData<bool> reduced_;
+
ZoneMap<int, InductionVariable*> induction_vars_;
};
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 43f1518461..0c59453b41 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -62,8 +62,8 @@ class MachineRepresentationInferrer {
: MachineRepresentation::kBit;
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters: {
- CallDescriptor const* desc = CallDescriptorOf(input->op());
- return desc->GetReturnType(index).representation();
+ auto call_descriptor = CallDescriptorOf(input->op());
+ return call_descriptor->GetReturnType(index).representation();
}
default:
return MachineRepresentation::kNone;
@@ -110,7 +110,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kTypedStateValues:
representation_vector_[node->id()] = MachineRepresentation::kNone;
break;
- case IrOpcode::kAtomicLoad:
+ case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
@@ -119,6 +119,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
+ case IrOpcode::kSpeculationPoison:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -132,27 +133,27 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kCall:
case IrOpcode::kCallWithCallerSavedRegisters: {
- CallDescriptor const* desc = CallDescriptorOf(node->op());
- if (desc->ReturnCount() > 0) {
+ auto call_descriptor = CallDescriptorOf(node->op());
+ if (call_descriptor->ReturnCount() > 0) {
representation_vector_[node->id()] =
- desc->GetReturnType(0).representation();
+ call_descriptor->GetReturnType(0).representation();
} else {
representation_vector_[node->id()] =
MachineRepresentation::kTagged;
}
break;
}
- case IrOpcode::kAtomicStore:
+ case IrOpcode::kWord32AtomicStore:
representation_vector_[node->id()] =
PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
break;
- case IrOpcode::kAtomicExchange:
- case IrOpcode::kAtomicCompareExchange:
- case IrOpcode::kAtomicAdd:
- case IrOpcode::kAtomicSub:
- case IrOpcode::kAtomicAnd:
- case IrOpcode::kAtomicOr:
- case IrOpcode::kAtomicXor:
+ case IrOpcode::kWord32AtomicExchange:
+ case IrOpcode::kWord32AtomicCompareExchange:
+ case IrOpcode::kWord32AtomicAdd:
+ case IrOpcode::kWord32AtomicSub:
+ case IrOpcode::kWord32AtomicAnd:
+ case IrOpcode::kWord32AtomicOr:
+ case IrOpcode::kWord32AtomicXor:
representation_vector_[node->id()] = PromoteRepresentation(
AtomicOpRepresentationOf(node->op()).representation());
break;
@@ -459,19 +460,19 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
- case IrOpcode::kAtomicLoad:
+ case IrOpcode::kWord32AtomicLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
break;
case IrOpcode::kStore:
- case IrOpcode::kAtomicStore:
- case IrOpcode::kAtomicExchange:
- case IrOpcode::kAtomicAdd:
- case IrOpcode::kAtomicSub:
- case IrOpcode::kAtomicAnd:
- case IrOpcode::kAtomicOr:
- case IrOpcode::kAtomicXor:
+ case IrOpcode::kWord32AtomicStore:
+ case IrOpcode::kWord32AtomicExchange:
+ case IrOpcode::kWord32AtomicAdd:
+ case IrOpcode::kWord32AtomicSub:
+ case IrOpcode::kWord32AtomicAnd:
+ case IrOpcode::kWord32AtomicOr:
+ case IrOpcode::kWord32AtomicXor:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -486,7 +487,7 @@ class MachineRepresentationChecker {
node, 2, inferrer_->GetRepresentation(node));
}
break;
- case IrOpcode::kAtomicCompareExchange:
+ case IrOpcode::kWord32AtomicCompareExchange:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -737,15 +738,15 @@ class MachineRepresentationChecker {
}
void CheckCallInputs(Node const* node) {
- CallDescriptor const* desc = CallDescriptorOf(node->op());
+ auto call_descriptor = CallDescriptorOf(node->op());
std::ostringstream str;
bool should_log_error = false;
- for (size_t i = 0; i < desc->InputCount(); ++i) {
+ for (size_t i = 0; i < call_descriptor->InputCount(); ++i) {
Node const* input = node->InputAt(static_cast<int>(i));
MachineRepresentation const input_type =
inferrer_->GetRepresentation(input);
MachineRepresentation const expected_input_type =
- desc->GetInputType(i).representation();
+ call_descriptor->GetInputType(i).representation();
if (!IsCompatible(expected_input_type, input_type)) {
if (!should_log_error) {
should_log_error = true;
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 97c83b1b82..1fcfa52e51 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -352,10 +352,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
- if (m.right().IsNaN()) { // x + NaN => NaN
- // Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
- }
if (m.IsFoldable()) { // K + K => K
return ReplaceFloat64(m.left().Value() + m.right().Value());
}
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 66178308be..c091146f1d 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -37,7 +37,8 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
- IrOpcode::kAtomicLoad == op->opcode());
+ IrOpcode::kWord32AtomicLoad == op->opcode() ||
+ IrOpcode::kPoisonedLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
@@ -81,7 +82,7 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kAtomicStore, op->opcode());
+ DCHECK_EQ(IrOpcode::kWord32AtomicStore, op->opcode());
return OpParameter<MachineRepresentation>(op);
}
@@ -169,6 +170,11 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord8ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord16ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord8ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord16ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(SignExtendWord32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
@@ -219,6 +225,7 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(SpeculationPoison, Operator::kNoProperties, 0, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
@@ -454,6 +461,14 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
+ struct PoisonedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ PoisonedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kPoisonedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<UnalignedLoadRepresentation> { \
UnalignedLoad##Type##Operator() \
@@ -471,6 +486,7 @@ struct MachineOperatorGlobalCache {
1, 1, 1, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
+ PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
@@ -547,30 +563,31 @@ struct MachineOperatorGlobalCache {
#undef STORE
#define ATOMIC_LOAD(Type) \
- struct AtomicLoad##Type##Operator final \
+ struct Word32AtomicLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
- AtomicLoad##Type##Operator() \
+ Word32AtomicLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
- IrOpcode::kAtomicLoad, \
+ IrOpcode::kWord32AtomicLoad, \
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
- AtomicLoad##Type##Operator kAtomicLoad##Type;
+ Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
-#define ATOMIC_STORE(Type) \
- struct AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kAtomicStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
- }; \
- AtomicStore##Type##Operator kAtomicStore##Type;
+#define ATOMIC_STORE(Type) \
+ struct Word32AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ Word32AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
-#undef STORE
+#undef ATOMIC_STORE
#define ATOMIC_OP(op, type) \
struct op##type##Operator : public Operator1<MachineType> { \
@@ -580,27 +597,28 @@ struct MachineOperatorGlobalCache {
3, 1, 1, 1, 1, 0, MachineType::type()) {} \
}; \
op##type##Operator k##op##type;
-#define ATOMIC_OP_LIST(type) \
- ATOMIC_OP(AtomicExchange, type) \
- ATOMIC_OP(AtomicAdd, type) \
- ATOMIC_OP(AtomicSub, type) \
- ATOMIC_OP(AtomicAnd, type) \
- ATOMIC_OP(AtomicOr, type) \
- ATOMIC_OP(AtomicXor, type)
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC_OP(Word32AtomicExchange, type) \
+ ATOMIC_OP(Word32AtomicAdd, type) \
+ ATOMIC_OP(Word32AtomicSub, type) \
+ ATOMIC_OP(Word32AtomicAnd, type) \
+ ATOMIC_OP(Word32AtomicOr, type) \
+ ATOMIC_OP(Word32AtomicXor, type)
ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST
#undef ATOMIC_OP
-#define ATOMIC_COMPARE_EXCHANGE(Type) \
- struct AtomicCompareExchange##Type##Operator \
- : public Operator1<MachineType> { \
- AtomicCompareExchange##Type##Operator() \
- : Operator1<MachineType>(IrOpcode::kAtomicCompareExchange, \
- Operator::kNoDeopt | Operator::kNoThrow, \
- "AtomicCompareExchange", 4, 1, 1, 1, 1, 0, \
- MachineType::Type()) {} \
- }; \
- AtomicCompareExchange##Type##Operator kAtomicCompareExchange##Type;
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word32AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word32AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord32AtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word32AtomicCompareExchange", 4, 1, 1, 1, 1, \
+ 0, MachineType::Type()) {} \
+ }; \
+ Word32AtomicCompareExchange##Type##Operator \
+ kWord32AtomicCompareExchange##Type;
ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
@@ -730,6 +748,16 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kPoisonedLoad##Type; \
+ }
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+}
+
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -823,90 +851,93 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
-const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kAtomicLoad##Type; \
+const Operator* MachineOperatorBuilder::Word32AtomicLoad(
+ LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kWord32AtomicLoad##Type; \
}
ATOMIC_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kAtomicStore##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicStore(
+ MachineRepresentation rep) {
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kWord32AtomicStore##kRep; \
}
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
-#define EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType rep) {
+#define EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicExchange##kRep; \
}
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
-#define COMPARE_EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicCompareExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
+ MachineType rep) {
+#define COMPARE_EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicCompareExchange##kRep; \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
-#define ADD(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicAdd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType rep) {
+#define ADD(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicAdd##kRep; \
}
ATOMIC_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
-#define SUB(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicSub##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType rep) {
+#define SUB(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicSub##kRep; \
}
ATOMIC_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
-#define AND(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicAnd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType rep) {
+#define AND(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicAnd##kRep; \
}
ATOMIC_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
-#define OR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicOr##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType rep) {
+#define OR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicOr##kRep; \
}
ATOMIC_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
-#define XOR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kAtomicXor##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType rep) {
+#define XOR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord32AtomicXor##kRep; \
}
ATOMIC_TYPE_LIST(XOR)
#undef XOR
@@ -967,6 +998,19 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
2, 0, 0, 1, 0, 0, array);
}
+#undef PURE_BINARY_OP_LIST_32
+#undef PURE_BINARY_OP_LIST_64
+#undef MACHINE_PURE_OP_LIST
+#undef PURE_OPTIONAL_OP_LIST
+#undef OVERFLOW_OP_LIST
+#undef MACHINE_TYPE_LIST
+#undef MACHINE_REPRESENTATION_LIST
+#undef ATOMIC_TYPE_LIST
+#undef ATOMIC_REPRESENTATION_LIST
+#undef SIMD_LANE_OP_LIST
+#undef SIMD_FORMAT_LIST
+#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 10b4b15701..2cc1829116 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -350,6 +350,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* BitcastInt32ToFloat32();
const Operator* BitcastInt64ToFloat64();
+ // These operators sign-extend to Int32/Int64
+ const Operator* SignExtendWord8ToInt32();
+ const Operator* SignExtendWord16ToInt32();
+ const Operator* SignExtendWord8ToInt64();
+ const Operator* SignExtendWord16ToInt64();
+ const Operator* SignExtendWord32ToInt64();
+
// Floating point operators always operate with IEEE 754 round-to-nearest
// (single-precision).
const Operator* Float32Add();
@@ -577,6 +584,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// load [base + index]
const Operator* Load(LoadRepresentation rep);
+ const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
// store [base + index], value
@@ -592,29 +600,33 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
+ // Returns a value which can be used as a mask to poison values when executing
+ // speculatively.
+ const Operator* SpeculationPoison();
+
// Access to the machine stack.
const Operator* LoadStackPointer();
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
// atomic-load [base + index]
- const Operator* AtomicLoad(LoadRepresentation rep);
+ const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
- const Operator* AtomicStore(MachineRepresentation rep);
+ const Operator* Word32AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
- const Operator* AtomicExchange(MachineType rep);
+ const Operator* Word32AtomicExchange(MachineType rep);
// atomic-compare-exchange [base + index], old_value, new_value
- const Operator* AtomicCompareExchange(MachineType rep);
+ const Operator* Word32AtomicCompareExchange(MachineType rep);
// atomic-add [base + index], value
- const Operator* AtomicAdd(MachineType rep);
+ const Operator* Word32AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
- const Operator* AtomicSub(MachineType rep);
+ const Operator* Word32AtomicSub(MachineType rep);
// atomic-and [base + index], value
- const Operator* AtomicAnd(MachineType rep);
+ const Operator* Word32AtomicAnd(MachineType rep);
// atomic-or [base + index], value
- const Operator* AtomicOr(MachineType rep);
+ const Operator* Word32AtomicOr(MachineType rep);
// atomic-xor [base + index], value
- const Operator* AtomicXor(MachineType rep);
+ const Operator* Word32AtomicXor(MachineType rep);
const OptionalOperator SpeculationFence();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 596204e214..95418c4a81 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -15,13 +15,15 @@ namespace v8 {
namespace internal {
namespace compiler {
-MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
+MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
+ LoadPoisoning load_poisoning)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone),
- graph_assembler_(jsgraph, nullptr, nullptr, zone) {}
+ graph_assembler_(jsgraph, nullptr, nullptr, zone),
+ load_poisoning_(load_poisoning) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -229,9 +231,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
- allocate_operator_.set(common()->Call(descriptor));
+ allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ Call(allocate_operator_.get(), target, size);
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
@@ -284,9 +286,9 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
: __
AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
- allocate_operator_.set(common()->Call(descriptor));
+ allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), target, size));
@@ -348,7 +350,14 @@ void MemoryOptimizer::VisitLoadElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ if (load_poisoning_ == LoadPoisoning::kDoPoison &&
+ access.machine_type.representation() !=
+ MachineRepresentation::kTaggedPointer) {
+ NodeProperties::ChangeOp(node,
+ machine()->PoisonedLoad(access.machine_type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ }
EnqueueUses(node, state);
}
@@ -357,7 +366,14 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ if (load_poisoning_ == LoadPoisoning::kDoPoison &&
+ access.machine_type.representation() !=
+ MachineRepresentation::kTaggedPointer) {
+ NodeProperties::ChangeOp(node,
+ machine()->PoisonedLoad(access.machine_type));
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ }
EnqueueUses(node, state);
}
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index e229f2b0be..38643ea8a3 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -31,7 +31,7 @@ typedef uint32_t NodeId;
// implicitly.
class MemoryOptimizer final {
public:
- MemoryOptimizer(JSGraph* jsgraph, Zone* zone);
+ MemoryOptimizer(JSGraph* jsgraph, Zone* zone, LoadPoisoning load_poisoning);
~MemoryOptimizer() {}
void Optimize();
@@ -142,6 +142,7 @@ class MemoryOptimizer final {
ZoneQueue<Token> tokens_;
Zone* const zone_;
GraphAssembler graph_assembler_;
+ LoadPoisoning load_poisoning_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 3b57081c9e..91e68feb94 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -618,33 +618,53 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(at);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(at));
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // This push on ra and the pop below together ensure that we restore the
- // register ra, which is needed while computing frames for deoptimization.
- __ push(ra);
- // The bal instruction puts the address of the current instruction into
- // the return address (ra) register, which we can use later on.
- __ bal(&current);
- __ nop();
- int pc = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ lw(a2, MemOperand(ra, offset));
- __ pop(ra);
- __ lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ lw(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
+ __ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
+}
+
+void CodeGenerator::GenerateSpeculationPoison() {
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ ComputeCodeStartAddress(at);
+ __ Move(kSpeculationPoisonRegister, at);
+ __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ at);
+ __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerPointer - 1);
+ __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -721,8 +741,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
- __ lw(at, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Call(at, Code::kHeaderSize - kHeapObjectTag);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1040,73 +1062,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsCtz: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- if (IsMipsArchVariant(kMips32r6)) {
- // We don't have an instruction to count the number of trailing zeroes.
- // Start by flipping the bits end-for-end so we can count the number of
- // leading zeroes instead.
- __ Ror(dst, src, 16);
- __ wsbh(dst, dst);
- __ bitswap(dst, dst);
- __ Clz(dst, dst);
- } else {
- // Convert trailing zeroes to trailing ones, and bits to their left
- // to zeroes.
- __ Addu(kScratchReg, src, -1);
- __ Xor(dst, kScratchReg, src);
- __ And(dst, dst, kScratchReg);
- // Count number of leading zeroes.
- __ Clz(dst, dst);
- // Subtract number of leading zeroes from 32 to get number of trailing
- // ones. Remember that the trailing ones were formerly trailing zeroes.
- __ li(kScratchReg, 32);
- __ Subu(dst, kScratchReg, dst);
- }
+ __ Ctz(dst, src);
} break;
case kMipsPopcnt: {
- // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
- //
- // A generalization of the best bit counting method to integers of
- // bit-widths up to 128 (parameterized by type T) is this:
- //
- // v = v - ((v >> 1) & (T)~(T)0/3); // temp
- // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
- // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
- // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
- //
- // For comparison, for 32-bit quantities, this algorithm can be executed
- // using 20 MIPS instructions (the calls to LoadConst32() generate two
- // machine instructions each for the values being used in this algorithm).
- // A(n unrolled) loop-based algorithm requires 25 instructions.
- //
- // For 64-bit quantities, this algorithm gets executed twice, (once
- // for in_lo, and again for in_hi), but saves a few instructions
- // because the mask values only have to be loaded once. Using this
- // algorithm the count for a 64-bit operand can be performed in 29
- // instructions compared to a loop-based algorithm which requires 47
- // instructions.
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- uint32_t B0 = 0x55555555; // (T)~(T)0/3
- uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
- uint32_t value = 0x01010101; // (T)~(T)0/255
- uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
- __ srl(kScratchReg, src, 1);
- __ li(kScratchReg2, B0);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Subu(kScratchReg, src, kScratchReg);
- __ li(kScratchReg2, B1);
- __ And(dst, kScratchReg, kScratchReg2);
- __ srl(kScratchReg, kScratchReg, 2);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Addu(kScratchReg, dst, kScratchReg);
- __ srl(dst, kScratchReg, 4);
- __ Addu(dst, dst, kScratchReg);
- __ li(kScratchReg2, B2);
- __ And(dst, dst, kScratchReg2);
- __ li(kScratchReg, value);
- __ Mul(dst, dst, kScratchReg);
- __ srl(dst, dst, shift);
+ __ Popcnt(dst, src);
} break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1650,7 +1611,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kMipsPeek: {
- int reverse_slot = MiscField::decode(instr->opcode());
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = i.InputInt32(0) + 1;
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1676,9 +1638,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
__ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
}
} else {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1689,74 +1654,74 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -2979,6 +2944,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ UNREACHABLE();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3026,8 +2996,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
tasm()->isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -3253,9 +3224,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
- __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
}
- __ nop(); // Branch delay slot of the last beq.
AssembleArchJump(i.InputRpo(1));
}
@@ -3271,9 +3241,9 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
frame->AlignSavedCalleeRegisterSlots();
}
@@ -3285,7 +3255,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
int count = base::bits::CountPopulation(saves);
DCHECK_EQ(kNumCalleeSaved, count + 1);
@@ -3294,14 +3264,14 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -3309,8 +3279,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3325,8 +3295,8 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
@@ -3355,8 +3325,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3364,19 +3334,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore GP registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
__ MultiPopFPU(saves_fpu);
}
MipsOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -3498,21 +3468,33 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
} else if (source->IsFPRegister()) {
- FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ __ st_b(src, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- MachineRepresentation rep =
- LocationOperand::cast(source)->representation();
- if (rep == MachineRepresentation::kFloat64) {
- __ Sdc1(src, g.ToMemOperand(destination));
- } else if (rep == MachineRepresentation::kFloat32) {
- __ swc1(src, g.ToMemOperand(destination));
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
} else {
- DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ DCHECK(destination->IsFPStackSlot());
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ Sdc1(src, g.ToMemOperand(destination));
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ swc1(src, g.ToMemOperand(destination));
+ } else {
+ UNREACHABLE();
+ }
}
}
} else if (source->IsFPStackSlot()) {
@@ -3526,7 +3508,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ lwc1(g.ToDoubleRegister(destination), src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ld_b(g.ToSimd128Register(destination), src);
}
} else {
FPURegister temp = kScratchDoubleReg;
@@ -3538,7 +3521,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ swc1(temp, g.ToMemOperand(destination));
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ __ ld_b(temp, src);
+ __ st_b(temp, g.ToMemOperand(destination));
}
}
} else {
@@ -3579,29 +3565,50 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ sw(temp_0, dst);
__ sw(temp_1, src);
} else if (source->IsFPRegister()) {
- FPURegister temp = kScratchDoubleReg;
- FPURegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ MSARegister src = g.ToSimd128Register(source);
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(temp, src);
+ __ move_v(src, dst);
+ __ move_v(dst, temp);
+ } else {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ }
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
MachineRepresentation rep =
LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
__ Move(temp, src);
__ Ldc1(src, dst);
__ Sdc1(temp, dst);
} else if (rep == MachineRepresentation::kFloat32) {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToFloatRegister(source);
__ Move(temp, src);
__ lwc1(src, dst);
__ swc1(temp, dst);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ MSARegister src = g.ToSimd128Register(source);
+ __ move_v(temp, src);
+ __ ld_b(src, dst);
+ __ st_b(temp, dst);
}
}
} else if (source->IsFPStackSlot()) {
@@ -3627,7 +3634,24 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ swc1(temp_1, src0);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- UNREACHABLE();
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
+ MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
+ MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
+ MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
+ MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp_1 = kSimd128ScratchReg;
+ __ ld_b(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ lw(temp_0, src1);
+ __ sw(temp_0, dst1);
+ __ lw(temp_0, src2);
+ __ sw(temp_0, dst2);
+ __ lw(temp_0, src3);
+ __ sw(temp_0, dst3);
+ __ st_b(temp_1, src0);
}
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 35b8a2396d..f0b8a0d588 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -267,7 +267,7 @@ void InstructionSelector::VisitLoad(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kMipsLwc1;
@@ -296,6 +296,10 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -310,6 +314,8 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1157,14 +1163,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
MipsOperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1178,7 +1184,7 @@ void InstructionSelector::EmitPrepareArguments(
}
} else {
// Possibly align stack here for functions.
- int push_count = static_cast<int>(descriptor->StackParameterCount());
+ int push_count = static_cast<int>(call_descriptor->StackParameterCount());
if (push_count > 0) {
// Calculate needed space
int stack_size = 0;
@@ -1201,30 +1207,26 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
MipsOperandGenerator g(this);
int reverse_slot = 0;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
- ++reverse_slot;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
}
- InstructionOperand result = g.DefineAsRegister(output.node);
- Emit(kMipsPeek | MiscField::encode(reverse_slot), result);
- }
- if (output.location.GetType() == MachineType::Float64()) {
- // Float64 require an implicit second slot.
- ++reverse_slot;
+ Emit(kMipsPeek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
}
+ reverse_slot += output.location.GetSizeInPointers();
}
}
@@ -1467,12 +1469,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kMipsCmp, cont, false);
}
+} // namespace
+
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1481,41 +1484,41 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
+ return VisitWordCompare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1527,17 +1530,17 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (!result || selector->IsDefined(result)) {
+ if (!result || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMipsAddOvf, cont);
+ return VisitBinop(this, node, kMipsAddOvf, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMipsSubOvf, cont);
+ return VisitBinop(this, node, kMipsSubOvf, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMipsMulOvf, cont);
+ return VisitBinop(this, node, kMipsMulOvf, cont);
default:
break;
}
@@ -1545,91 +1548,58 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kMipsTst, cont, true);
+ return VisitWordCompare(this, value, kMipsTst, cont, true);
default:
break;
}
}
// Continuation could not be combined with a compare, emit compare against 0.
- MipsOperandGenerator g(selector);
+ MipsOperandGenerator g(this);
InstructionCode const opcode = cont->Encode(kMipsCmp);
InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->feedback(), cont->frame_state());
+ EmitDeoptimize(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
- g.TempImmediate(0));
+ Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+ g.TempImmediate(0));
} else {
DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.TempImmediate(cont->trap_id()));
+ Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.TempImmediate(cont->trap_id()));
}
}
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 9 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 2 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kMipsSub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 9 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMipsSub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -1641,7 +1611,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, &cont);
}
@@ -1778,7 +1748,7 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
arraysize(temps), temps);
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1786,13 +1756,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1812,7 +1784,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
}
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1821,13 +1793,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -1848,7 +1820,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1856,15 +1828,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1886,7 +1858,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1895,15 +1867,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -1967,11 +1939,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -1996,11 +1969,6 @@ void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
@@ -2263,6 +2231,16 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSeb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2302,11 +2280,13 @@ InstructionSelector::AlignmentRequirements() {
}
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
+
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
#undef SIMD_UNOP_LIST
#undef SIMD_TYPE_LIST
-#undef SIMD_FORMAT_LIST
#undef TRACE_UNIMPL
#undef TRACE
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 3fce7dd688..4ce9d7f91d 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com \ No newline at end of file
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index d4463008c8..ab84fe22b2 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -634,33 +634,53 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(at);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(at));
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // This push on ra and the pop below together ensure that we restore the
- // register ra, which is needed while computing frames for deoptimization.
- __ push(ra);
- // The bal instruction puts the address of the current instruction into
- // the return address (ra) register, which we can use later on.
- __ bal(&current);
- __ nop();
- int pc = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ Ld(a2, MemOperand(ra, offset));
- __ pop(ra);
- __ Lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne, a2, Operand(zero_reg));
+ __ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
+}
+
+void CodeGenerator::GenerateSpeculationPoison() {
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ ComputeCodeStartAddress(at);
+ __ Move(kSpeculationPoisonRegister, at);
+ __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ at);
+ __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerPointer - 1);
+ __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ And(sp, sp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -746,9 +766,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
- __ Ld(at, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(at);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1153,124 +1174,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Ctz: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- if (kArchVariant == kMips64r6) {
- // We don't have an instruction to count the number of trailing zeroes.
- // Start by flipping the bits end-for-end so we can count the number of
- // leading zeroes instead.
- __ rotr(dst, src, 16);
- __ wsbh(dst, dst);
- __ bitswap(dst, dst);
- __ Clz(dst, dst);
- } else {
- // Convert trailing zeroes to trailing ones, and bits to their left
- // to zeroes.
- __ Daddu(kScratchReg, src, -1);
- __ Xor(dst, kScratchReg, src);
- __ And(dst, dst, kScratchReg);
- // Count number of leading zeroes.
- __ Clz(dst, dst);
- // Subtract number of leading zeroes from 32 to get number of trailing
- // ones. Remember that the trailing ones were formerly trailing zeroes.
- __ li(kScratchReg, 32);
- __ Subu(dst, kScratchReg, dst);
- }
+ __ Ctz(dst, src);
} break;
case kMips64Dctz: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- if (kArchVariant == kMips64r6) {
- // We don't have an instruction to count the number of trailing zeroes.
- // Start by flipping the bits end-for-end so we can count the number of
- // leading zeroes instead.
- __ dsbh(dst, src);
- __ dshd(dst, dst);
- __ dbitswap(dst, dst);
- __ dclz(dst, dst);
- } else {
- // Convert trailing zeroes to trailing ones, and bits to their left
- // to zeroes.
- __ Daddu(kScratchReg, src, -1);
- __ Xor(dst, kScratchReg, src);
- __ And(dst, dst, kScratchReg);
- // Count number of leading zeroes.
- __ dclz(dst, dst);
- // Subtract number of leading zeroes from 64 to get number of trailing
- // ones. Remember that the trailing ones were formerly trailing zeroes.
- __ li(kScratchReg, 64);
- __ Dsubu(dst, kScratchReg, dst);
- }
+ __ Dctz(dst, src);
} break;
case kMips64Popcnt: {
- // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
- //
- // A generalization of the best bit counting method to integers of
- // bit-widths up to 128 (parameterized by type T) is this:
- //
- // v = v - ((v >> 1) & (T)~(T)0/3); // temp
- // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
- // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
- // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
- //
- // For comparison, for 32-bit quantities, this algorithm can be executed
- // using 20 MIPS instructions (the calls to LoadConst32() generate two
- // machine instructions each for the values being used in this algorithm).
- // A(n unrolled) loop-based algorithm requires 25 instructions.
- //
- // For a 64-bit operand this can be performed in 24 instructions compared
- // to a(n unrolled) loop based algorithm which requires 38 instructions.
- //
- // There are algorithms which are faster in the cases where very few
- // bits are set but the algorithm here attempts to minimize the total
- // number of instructions executed even when a large number of bits
- // are set.
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- uint32_t B0 = 0x55555555; // (T)~(T)0/3
- uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
- uint32_t value = 0x01010101; // (T)~(T)0/255
- uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
- __ srl(kScratchReg, src, 1);
- __ li(kScratchReg2, B0);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Subu(kScratchReg, src, kScratchReg);
- __ li(kScratchReg2, B1);
- __ And(dst, kScratchReg, kScratchReg2);
- __ srl(kScratchReg, kScratchReg, 2);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Addu(kScratchReg, dst, kScratchReg);
- __ srl(dst, kScratchReg, 4);
- __ Addu(dst, dst, kScratchReg);
- __ li(kScratchReg2, B2);
- __ And(dst, dst, kScratchReg2);
- __ li(kScratchReg, value);
- __ Mul(dst, dst, kScratchReg);
- __ srl(dst, dst, shift);
+ __ Popcnt(dst, src);
} break;
case kMips64Dpopcnt: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
- uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
- uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
- uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
- uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
- __ dsrl(kScratchReg, src, 1);
- __ li(kScratchReg2, B0);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Dsubu(kScratchReg, src, kScratchReg);
- __ li(kScratchReg2, B1);
- __ And(dst, kScratchReg, kScratchReg2);
- __ dsrl(kScratchReg, kScratchReg, 2);
- __ And(kScratchReg, kScratchReg, kScratchReg2);
- __ Daddu(kScratchReg, dst, kScratchReg);
- __ dsrl(dst, kScratchReg, 4);
- __ Daddu(dst, dst, kScratchReg);
- __ li(kScratchReg2, B2);
- __ And(dst, dst, kScratchReg2);
- __ li(kScratchReg, value);
- __ Dmul(dst, dst, kScratchReg);
- __ dsrl32(dst, dst, shift);
+ __ Dpopcnt(dst, src);
} break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1901,7 +1820,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Peek: {
// The incoming value is 0-based, but we need a 1-based value.
- int reverse_slot = MiscField::decode(instr->opcode()) + 1;
+ int reverse_slot = i.InputInt32(0) + 1;
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1924,7 +1843,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64StoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) {
- __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
} else {
__ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
@@ -1939,75 +1863,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
break;
}
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kAtomicCompareExchangeInt8:
+ case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kAtomicCompareExchangeUint8:
+ case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kAtomicCompareExchangeInt16:
+ case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kAtomicCompareExchangeUint16:
+ case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kAtomicCompareExchangeWord32:
+ case kWord32AtomicCompareExchangeWord32:
__ sll(i.InputRegister(2), i.InputRegister(2), 0);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -3240,6 +3164,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ UNREACHABLE();
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3285,8 +3214,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
tasm()->isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
pop_count += (pop_count & 1); // align
__ Drop(pop_count);
__ Ret();
@@ -3523,9 +3453,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
- __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
}
- __ nop(); // Branch delay slot of the last beq.
AssembleArchJump(i.InputRpo(1));
}
@@ -3541,9 +3470,9 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
int count = base::bits::CountPopulation(saves_fpu);
DCHECK_EQ(kNumCalleeSavedFPU, count);
@@ -3551,7 +3480,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
int count = base::bits::CountPopulation(saves);
DCHECK_EQ(kNumCalleeSaved, count + 1);
@@ -3560,15 +3489,15 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -3576,8 +3505,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3592,8 +3521,8 @@ void CodeGenerator::AssembleConstructFrame() {
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
const int returns = frame()->GetReturnSlotCount();
// Skip callee-saved and return slots, which are pushed below.
@@ -3623,7 +3552,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3631,19 +3560,19 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore GP registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore FPU registers.
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
__ MultiPopFPU(saves_fpu);
}
MipsOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
@@ -3660,7 +3589,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
AssembleDeconstructFrame();
}
}
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
if (pop->IsImmediate()) {
pop_count += g.ToConstant(pop).ToInt32();
} else {
@@ -3770,23 +3699,50 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
} else if (source->IsFPRegister()) {
- FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ __ st_b(src, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- __ Sdc1(src, g.ToMemOperand(destination));
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ Sdc1(src, g.ToMemOperand(destination));
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsFPRegister()) {
- __ Ldc1(g.ToDoubleRegister(destination), src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ if (destination->IsSimd128Register()) {
+ __ ld_b(g.ToSimd128Register(destination), src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ MSARegister temp = kSimd128ScratchReg;
+ __ ld_b(temp, src);
+ __ st_b(temp, g.ToMemOperand(destination));
+ }
} else {
- FPURegister temp = kScratchDoubleReg;
- __ Ldc1(temp, src);
- __ Sdc1(temp, g.ToMemOperand(destination));
+ if (destination->IsFPRegister()) {
+ __ Ldc1(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ __ Ldc1(temp, src);
+ __ Sdc1(temp, g.ToMemOperand(destination));
+ }
}
} else {
UNREACHABLE();
@@ -3826,34 +3782,73 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Sd(temp_0, dst);
__ Sd(temp_1, src);
} else if (source->IsFPRegister()) {
- FPURegister temp = kScratchDoubleReg;
- FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- FPURegister dst = g.ToDoubleRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp = kSimd128ScratchReg;
+ MSARegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ MSARegister dst = g.ToSimd128Register(destination);
+ __ move_v(temp, src);
+ __ move_v(src, dst);
+ __ move_v(dst, temp);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ move_v(temp, src);
+ __ ld_b(src, dst);
+ __ st_b(temp, dst);
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ Ldc1(src, dst);
- __ Sdc1(temp, dst);
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ Ldc1(src, dst);
+ __ Sdc1(temp, dst);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
- FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- __ Ldc1(temp_1, dst0); // Save destination in temp_1.
- __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ Sw(temp_0, dst0);
- __ Lw(temp_0, src1);
- __ Sw(temp_0, dst1);
- __ Sdc1(temp_1, src0);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kSimd128) {
+ MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
+ MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
+ MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
+ MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ MSARegister temp_1 = kSimd128ScratchReg;
+ __ ld_b(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ Lw(temp_0, src2);
+ __ Sw(temp_0, dst2);
+ __ Lw(temp_0, src3);
+ __ Sw(temp_0, dst3);
+ __ st_b(temp_1, src0);
+ } else {
+ FPURegister temp_1 = kScratchDoubleReg;
+ __ Ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ Sdc1(temp_1, src0);
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -3866,7 +3861,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNREACHABLE();
}
-
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 3058812bec..a50d294013 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
-#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#ifndef V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
+#define V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
namespace v8 {
namespace internal {
@@ -331,4 +331,4 @@ namespace compiler {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#endif // V8_COMPILER_MIPS64_INSTRUCTION_CODES_MIPS64_H_
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 38f077c4e6..f7c8cab67b 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -379,7 +379,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kMips64Lwc1;
@@ -410,10 +410,16 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
EmitLoad(this, node, opcode);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1382,6 +1388,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
break;
}
}
+ break;
}
default:
break;
@@ -1650,14 +1657,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
Mips64OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1668,10 +1675,17 @@ void InstructionSelector::EmitPrepareArguments(
++slot;
}
} else {
- int push_count = static_cast<int>(descriptor->StackParameterCount());
+ int push_count = static_cast<int>(call_descriptor->StackParameterCount());
if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
Emit(kMips64StackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
+ g.TempImmediate(stack_size << kPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
@@ -1683,9 +1697,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
Mips64OperandGenerator g(this);
int reverse_slot = 0;
@@ -1693,14 +1707,14 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
if (output.node != nullptr) {
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
}
- InstructionOperand result = g.DefineAsRegister(output.node);
- Emit(kMips64Peek | MiscField::encode(reverse_slot), result);
+ Emit(kMips64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
@@ -2057,12 +2071,13 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
}
+} // namespace
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (selector->CanCover(user, value)) {
+ while (CanCover(user, value)) {
if (value->opcode() == IrOpcode::kWord32Equal) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -2080,56 +2095,56 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -2141,23 +2156,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64Dadd, cont);
+ return VisitBinop(this, node, kMips64Dadd, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64Dsub, cont);
+ return VisitBinop(this, node, kMips64Dsub, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64MulOvf, cont);
+ return VisitBinop(this, node, kMips64MulOvf, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64DaddOvf, cont);
+ return VisitBinop(this, node, kMips64DaddOvf, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kMips64DsubOvf, cont);
+ return VisitBinop(this, node, kMips64DsubOvf, cont);
default:
break;
}
@@ -2166,49 +2181,14 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
case IrOpcode::kWord32And:
case IrOpcode::kWord64And:
- return VisitWordCompare(selector, value, kMips64Tst, cont, true);
+ return VisitWordCompare(this, value, kMips64Tst, cont, true);
default:
break;
}
}
// Continuation could not be combined with a compare, emit compare against 0.
- EmitWordCompareZero(selector, value, cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+ EmitWordCompareZero(this, value, cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -2216,24 +2196,26 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 10 + 2 * sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 2 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kMips64Sub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMips64Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2245,7 +2227,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWord32Compare(this, node, &cont);
@@ -2330,7 +2312,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWord64Compare(this, node, &cont);
@@ -2431,7 +2413,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2439,13 +2421,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2464,7 +2448,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
}
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2473,13 +2457,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2500,7 +2484,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
}
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2508,15 +2492,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2538,7 +2522,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2547,15 +2531,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2619,11 +2603,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2648,12 +2633,6 @@ void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
V(I16x8) \
V(I8x16)
-// TODO(mostynb@opera.com): this is never used, remove it?
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
@@ -2921,6 +2900,32 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2957,10 +2962,12 @@ InstructionSelector::AlignmentRequirements() {
}
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
+
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
#undef SIMD_UNOP_LIST
-#undef SIMD_FORMAT_LIST
#undef SIMD_TYPE_LIST
#undef TRACE_UNIMPL
#undef TRACE
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index 7c132ab153..c78da1e517 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_MOVE_OPTIMIZER_
-#define V8_COMPILER_MOVE_OPTIMIZER_
+#ifndef V8_COMPILER_MOVE_OPTIMIZER_H_
+#define V8_COMPILER_MOVE_OPTIMIZER_H_
#include "src/compiler/instruction.h"
#include "src/globals.h"
@@ -65,4 +65,4 @@ class V8_EXPORT_PRIVATE MoveOptimizer final {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_MOVE_OPTIMIZER_
+#endif // V8_COMPILER_MOVE_OPTIMIZER_H_
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
index 277ff18034..c395475109 100644
--- a/deps/v8/src/compiler/node-aux-data.h
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -15,15 +15,27 @@ namespace compiler {
// Forward declarations.
class Node;
-template <class T, T def()>
+template <class T>
+T DefaultConstruct() {
+ return T();
+}
+
+template <class T, T def() = DefaultConstruct<T>>
class NodeAuxData {
public:
explicit NodeAuxData(Zone* zone) : aux_data_(zone) {}
+ explicit NodeAuxData(size_t initial_size, Zone* zone)
+ : aux_data_(initial_size, zone) {}
- void Set(Node* node, T const& data) {
+ // Update entry. Returns true iff entry was changed.
+ bool Set(Node* node, T const& data) {
size_t const id = node->id();
if (id >= aux_data_.size()) aux_data_.resize(id + 1, def());
- aux_data_[id] = data;
+ if (aux_data_[id] != data) {
+ aux_data_[id] = data;
+ return true;
+ }
+ return false;
}
T Get(Node* node) const {
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 22004337eb..5fe6e5d420 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -516,6 +516,27 @@ MaybeHandle<Map> NodeProperties::GetMapWitness(Node* node) {
}
// static
+bool NodeProperties::HasInstanceTypeWitness(Node* receiver, Node* effect,
+ InstanceType instance_type) {
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ switch (result) {
+ case NodeProperties::kUnreliableReceiverMaps:
+ case NodeProperties::kReliableReceiverMaps:
+ DCHECK_NE(0, receiver_maps.size());
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != instance_type) return false;
+ }
+ return true;
+
+ case NodeProperties::kNoReceiverMaps:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+// static
bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
Node* dominator) {
while (effect != dominator) {
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index abc6622c83..7388bf94dd 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -155,6 +155,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
static MaybeHandle<Map> GetMapWitness(Node* node);
+ static bool HasInstanceTypeWitness(Node* receiver, Node* effect,
+ InstanceType instance_type);
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index ededcc4806..f988b954fb 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -60,8 +60,8 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
// Verify that none of the inputs are {nullptr}.
for (int i = 0; i < input_count; i++) {
if (inputs[i] == nullptr) {
- V8_Fatal(__FILE__, __LINE__, "Node::New() Error: #%d:%s[%d] is nullptr",
- static_cast<int>(id), op->mnemonic(), i);
+ FATAL("Node::New() Error: #%d:%s[%d] is nullptr", static_cast<int>(id),
+ op->mnemonic(), i);
}
}
#endif
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 8a4685114b..26fc03fb13 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -65,12 +65,12 @@ class V8_EXPORT_PRIVATE Node final {
#ifdef DEBUG
void Verify();
-#define BOUNDS_CHECK(index) \
- do { \
- if (index < 0 || index >= InputCount()) { \
- V8_Fatal(__FILE__, __LINE__, "Node #%d:%s->InputAt(%d) out of bounds", \
- id(), op()->mnemonic(), index); \
- } \
+#define BOUNDS_CHECK(index) \
+ do { \
+ if (index < 0 || index >= InputCount()) { \
+ FATAL("Node #%d:%s->InputAt(%d) out of bounds", id(), op()->mnemonic(), \
+ index); \
+ } \
} while (false)
#else
// No bounds checks or verification in release mode.
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index ec6c720af2..9a8f1e1df8 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -140,7 +140,9 @@
V(JSCreateClosure) \
V(JSCreateGeneratorObject) \
V(JSCreateIterResultObject) \
+ V(JSCreateStringIterator) \
V(JSCreateKeyValueArray) \
+ V(JSCreatePromise) \
V(JSCreateLiteralArray) \
V(JSCreateEmptyLiteralArray) \
V(JSCreateLiteralObject) \
@@ -191,8 +193,14 @@
V(JSStoreModule) \
V(JSGeneratorStore) \
V(JSGeneratorRestoreContinuation) \
+ V(JSGeneratorRestoreContext) \
V(JSGeneratorRestoreRegister) \
V(JSGeneratorRestoreInputOrDebugPos) \
+ V(JSFulfillPromise) \
+ V(JSPerformPromiseThen) \
+ V(JSPromiseResolve) \
+ V(JSRejectPromise) \
+ V(JSResolvePromise) \
V(JSStackCheck) \
V(JSDebugger)
@@ -342,6 +350,7 @@
V(StringLength) \
V(StringToLowerCaseIntl) \
V(StringToUpperCaseIntl) \
+ V(StringSubstring) \
V(CheckBounds) \
V(CheckIf) \
V(CheckMaps) \
@@ -361,7 +370,6 @@
V(ConvertReceiver) \
V(ConvertTaggedHoleToUndefined) \
V(TypeOf) \
- V(ClassOf) \
V(Allocate) \
V(AllocateRaw) \
V(LoadFieldByIndex) \
@@ -538,88 +546,95 @@
V(Float64Mod) \
V(Float64Pow)
-#define MACHINE_OP_LIST(V) \
- MACHINE_UNOP_32_LIST(V) \
- MACHINE_BINOP_32_LIST(V) \
- MACHINE_BINOP_64_LIST(V) \
- MACHINE_COMPARE_BINOP_LIST(V) \
- MACHINE_FLOAT32_BINOP_LIST(V) \
- MACHINE_FLOAT32_UNOP_LIST(V) \
- MACHINE_FLOAT64_BINOP_LIST(V) \
- MACHINE_FLOAT64_UNOP_LIST(V) \
- V(DebugAbort) \
- V(DebugBreak) \
- V(Comment) \
- V(Load) \
- V(Store) \
- V(StackSlot) \
- V(Word32Popcnt) \
- V(Word64Popcnt) \
- V(Word64Clz) \
- V(Word64Ctz) \
- V(Word64ReverseBits) \
- V(Word64ReverseBytes) \
- V(Int64AbsWithOverflow) \
- V(BitcastTaggedToWord) \
- V(BitcastWordToTagged) \
- V(BitcastWordToTaggedSigned) \
- V(TruncateFloat64ToWord32) \
- V(ChangeFloat32ToFloat64) \
- V(ChangeFloat64ToInt32) \
- V(ChangeFloat64ToUint32) \
- V(ChangeFloat64ToUint64) \
- V(Float64SilenceNaN) \
- V(TruncateFloat64ToUint32) \
- V(TruncateFloat32ToInt32) \
- V(TruncateFloat32ToUint32) \
- V(TryTruncateFloat32ToInt64) \
- V(TryTruncateFloat64ToInt64) \
- V(TryTruncateFloat32ToUint64) \
- V(TryTruncateFloat64ToUint64) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(TruncateFloat64ToFloat32) \
- V(TruncateInt64ToInt32) \
- V(RoundFloat64ToInt32) \
- V(RoundInt32ToFloat32) \
- V(RoundInt64ToFloat32) \
- V(RoundInt64ToFloat64) \
- V(RoundUint32ToFloat32) \
- V(RoundUint64ToFloat32) \
- V(RoundUint64ToFloat64) \
- V(BitcastFloat32ToInt32) \
- V(BitcastFloat64ToInt64) \
- V(BitcastInt32ToFloat32) \
- V(BitcastInt64ToFloat64) \
- V(Float64ExtractLowWord32) \
- V(Float64ExtractHighWord32) \
- V(Float64InsertLowWord32) \
- V(Float64InsertHighWord32) \
- V(LoadStackPointer) \
- V(LoadFramePointer) \
- V(LoadParentFramePointer) \
- V(UnalignedLoad) \
- V(UnalignedStore) \
- V(Int32PairAdd) \
- V(Int32PairSub) \
- V(Int32PairMul) \
- V(Word32PairShl) \
- V(Word32PairShr) \
- V(Word32PairSar) \
- V(ProtectedLoad) \
- V(ProtectedStore) \
- V(AtomicLoad) \
- V(AtomicStore) \
- V(AtomicExchange) \
- V(AtomicCompareExchange) \
- V(AtomicAdd) \
- V(AtomicSub) \
- V(AtomicAnd) \
- V(AtomicOr) \
- V(AtomicXor) \
- V(SpeculationFence) \
+#define MACHINE_OP_LIST(V) \
+ MACHINE_UNOP_32_LIST(V) \
+ MACHINE_BINOP_32_LIST(V) \
+ MACHINE_BINOP_64_LIST(V) \
+ MACHINE_COMPARE_BINOP_LIST(V) \
+ MACHINE_FLOAT32_BINOP_LIST(V) \
+ MACHINE_FLOAT32_UNOP_LIST(V) \
+ MACHINE_FLOAT64_BINOP_LIST(V) \
+ MACHINE_FLOAT64_UNOP_LIST(V) \
+ V(DebugAbort) \
+ V(DebugBreak) \
+ V(Comment) \
+ V(Load) \
+ V(PoisonedLoad) \
+ V(Store) \
+ V(StackSlot) \
+ V(Word32Popcnt) \
+ V(Word64Popcnt) \
+ V(Word64Clz) \
+ V(Word64Ctz) \
+ V(Word64ReverseBits) \
+ V(Word64ReverseBytes) \
+ V(Int64AbsWithOverflow) \
+ V(BitcastTaggedToWord) \
+ V(BitcastWordToTagged) \
+ V(BitcastWordToTaggedSigned) \
+ V(TruncateFloat64ToWord32) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeFloat64ToUint64) \
+ V(Float64SilenceNaN) \
+ V(TruncateFloat64ToUint32) \
+ V(TruncateFloat32ToInt32) \
+ V(TruncateFloat32ToUint32) \
+ V(TryTruncateFloat32ToInt64) \
+ V(TryTruncateFloat64ToInt64) \
+ V(TryTruncateFloat32ToUint64) \
+ V(TryTruncateFloat64ToUint64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
+ V(RoundInt32ToFloat32) \
+ V(RoundInt64ToFloat32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundUint32ToFloat32) \
+ V(RoundUint64ToFloat32) \
+ V(RoundUint64ToFloat64) \
+ V(BitcastFloat32ToInt32) \
+ V(BitcastFloat64ToInt64) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(SpeculationPoison) \
+ V(LoadStackPointer) \
+ V(LoadFramePointer) \
+ V(LoadParentFramePointer) \
+ V(UnalignedLoad) \
+ V(UnalignedStore) \
+ V(Int32PairAdd) \
+ V(Int32PairSub) \
+ V(Int32PairMul) \
+ V(Word32PairShl) \
+ V(Word32PairShr) \
+ V(Word32PairSar) \
+ V(ProtectedLoad) \
+ V(ProtectedStore) \
+ V(Word32AtomicLoad) \
+ V(Word32AtomicStore) \
+ V(Word32AtomicExchange) \
+ V(Word32AtomicCompareExchange) \
+ V(Word32AtomicAdd) \
+ V(Word32AtomicSub) \
+ V(Word32AtomicAnd) \
+ V(Word32AtomicOr) \
+ V(Word32AtomicXor) \
+ V(SpeculationFence) \
+ V(SignExtendWord8ToInt32) \
+ V(SignExtendWord16ToInt32) \
+ V(SignExtendWord8ToInt64) \
+ V(SignExtendWord16ToInt64) \
+ V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 226faeaa82..fc774f8706 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -510,7 +510,7 @@ Type* OperationTyper::NumberToString(Type* type) {
if (type->IsNone()) return type;
if (type->Is(Type::NaN())) return singleton_NaN_string_;
if (type->Is(cache_.kZeroOrMinusZero)) return singleton_zero_string_;
- return Type::SeqString();
+ return Type::String();
}
Type* OperationTyper::NumberToUint32(Type* type) {
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index d786bb3ee5..bd715df25e 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -115,6 +115,9 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSDecrement:
case IrOpcode::kJSIncrement:
case IrOpcode::kJSNegate:
+ case IrOpcode::kJSPromiseResolve:
+ case IrOpcode::kJSRejectPromise:
+ case IrOpcode::kJSResolvePromise:
return true;
default:
diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h
index ab0eebbbdd..e94165b36a 100644
--- a/deps/v8/src/compiler/persistent-map.h
+++ b/deps/v8/src/compiler/persistent-map.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_PERSISTENT_H_
-#define V8_COMPILER_PERSISTENT_H_
+#ifndef V8_COMPILER_PERSISTENT_MAP_H_
+#define V8_COMPILER_PERSISTENT_MAP_H_
#include <array>
-#include <bitset>
#include <tuple>
#include "src/base/functional.h"
@@ -74,8 +73,7 @@ class PersistentMap {
}
// Add or overwrite an existing key-value pair.
- PersistentMap Add(Key key, Value value) const;
- void Set(Key key, Value value) { *this = Add(key, value); }
+ void Set(Key key, Value value);
bool operator==(const PersistentMap& other) const {
if (tree_ == other.tree_) return true;
@@ -202,17 +200,16 @@ struct PersistentMap<Key, Value, Hasher>::FocusedTree {
template <class Key, class Value, class Hasher>
class PersistentMap<Key, Value, Hasher>::HashValue {
public:
- explicit HashValue(size_t hash) : bits_(hash) {}
- explicit HashValue(std::bitset<kHashBits> hash) : bits_(hash) {}
+ explicit HashValue(size_t hash) : bits_(static_cast<uint32_t>(hash)) {}
Bit operator[](int pos) const {
- return bits_[kHashBits - pos - 1] ? kRight : kLeft;
+ DCHECK_LT(pos, kHashBits);
+ return bits_ & (static_cast<decltype(bits_)>(1) << (kHashBits - pos - 1))
+ ? kRight
+ : kLeft;
}
- bool operator<(HashValue other) const {
- static_assert(sizeof(*this) <= sizeof(unsigned long), ""); // NOLINT
- return bits_.to_ulong() < other.bits_.to_ulong();
- }
+ bool operator<(HashValue other) const { return bits_ < other.bits_; }
bool operator==(HashValue other) const { return bits_ == other.bits_; }
bool operator!=(HashValue other) const { return bits_ != other.bits_; }
HashValue operator^(HashValue other) const {
@@ -220,7 +217,8 @@ class PersistentMap<Key, Value, Hasher>::HashValue {
}
private:
- std::bitset<kHashBits> bits_;
+ static_assert(sizeof(uint32_t) * 8 == kHashBits, "wrong type for bits_");
+ uint32_t bits_;
};
template <class Key, class Value, class Hasher>
@@ -263,7 +261,7 @@ class PersistentMap<Key, Value, Hasher>::iterator {
if (current_->more) {
more_iter_ = current_->more->begin();
}
- } while ((**this).second == def_value());
+ } while (!((**this).second != def_value()));
return *this;
}
@@ -281,12 +279,10 @@ class PersistentMap<Key, Value, Hasher>::iterator {
bool operator<(const iterator& other) const {
if (is_end()) return false;
if (other.is_end()) return true;
- if (current_->key_hash < other.current_->key_hash) {
- return true;
- } else if (current_->key_hash == other.current_->key_hash) {
+ if (current_->key_hash == other.current_->key_hash) {
return (**this).first < (*other).first;
} else {
- return false;
+ return current_->key_hash < other.current_->key_hash;
}
}
@@ -300,6 +296,9 @@ class PersistentMap<Key, Value, Hasher>::iterator {
if (i.current_->more) {
i.more_iter_ = i.current_->more->begin();
}
+ // Skip entries with default value. PersistentMap iterators must never point
+ // to a default value.
+ while (!i.is_end() && !((*i).second != def_value)) ++i;
return i;
}
@@ -333,8 +332,18 @@ class PersistentMap<Key, Value, Hasher>::double_iterator {
}
double_iterator& operator++() {
- if (first_current_) ++first_;
- if (second_current_) ++second_;
+#ifdef DEBUG
+ iterator old_first = first_;
+ iterator old_second = second_;
+#endif
+ if (first_current_) {
+ ++first_;
+ DCHECK(old_first < first_);
+ }
+ if (second_current_) {
+ ++second_;
+ DCHECK(old_second < second_);
+ }
return *this = double_iterator(first_, second_);
}
@@ -346,6 +355,7 @@ class PersistentMap<Key, Value, Hasher>::double_iterator {
first_current_ = true;
second_current_ = false;
} else {
+ DCHECK(second_ < first_);
first_current_ = false;
second_current_ = true;
}
@@ -365,14 +375,13 @@ class PersistentMap<Key, Value, Hasher>::double_iterator {
};
template <class Key, class Value, class Hasher>
-PersistentMap<Key, Value, Hasher> PersistentMap<Key, Value, Hasher>::Add(
- Key key, Value value) const {
+void PersistentMap<Key, Value, Hasher>::Set(Key key, Value value) {
HashValue key_hash = HashValue(Hasher()(key));
std::array<const FocusedTree*, kHashBits> path;
int length = 0;
const FocusedTree* old = FindHash(key_hash, &path, &length);
ZoneMap<Key, Value>* more = nullptr;
- if (GetFocusedValue(old, key) == value) return *this;
+ if (!(GetFocusedValue(old, key) != value)) return;
if (old && !(old->more == nullptr && old->key_value.key() == key)) {
more = new (zone_->New(sizeof(*more))) ZoneMap<Key, Value>(zone_);
if (old->more) {
@@ -393,7 +402,7 @@ PersistentMap<Key, Value, Hasher> PersistentMap<Key, Value, Hasher>::Add(
for (int i = 0; i < length; ++i) {
tree->path(i) = path[i];
}
- return PersistentMap(tree, zone_, def_value_);
+ *this = PersistentMap(tree, zone_, def_value_);
}
template <class Key, class Value, class Hasher>
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index b2bf3ac76a..900cd1cd8f 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -98,4 +98,4 @@ class PhaseScope {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_PIPELINE_STATISTICS_H_
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index b4567ab04f..fe29917e61 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -83,6 +83,11 @@ struct ProtectedInstructionData;
namespace compiler {
+// Turbofan can only handle 2^16 control inputs. Since each control flow split
+// requires at least two bytes (jump and offset), we limit the bytecode size
+// to 128K bytes.
+const int kMaxBytecodeSizeForTurbofan = 128 * 1024;
+
class PipelineData {
public:
// For main entry point.
@@ -292,32 +297,32 @@ class PipelineData {
register_allocation_data_ = nullptr;
}
- void InitializeInstructionSequence(const CallDescriptor* descriptor) {
+ void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
DCHECK_NULL(sequence_);
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
sequence_ = new (instruction_zone())
InstructionSequence(isolate(), instruction_zone(), instruction_blocks);
- if (descriptor && descriptor->RequiresFrameAsIncoming()) {
+ if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
sequence_->instruction_blocks()[0]->mark_needs_frame();
} else {
- DCHECK_EQ(0u, descriptor->CalleeSavedFPRegisters());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters());
}
}
- void InitializeFrameData(CallDescriptor* descriptor) {
+ void InitializeFrameData(CallDescriptor* call_descriptor) {
DCHECK_NULL(frame_);
int fixed_frame_size = 0;
- if (descriptor != nullptr) {
- fixed_frame_size = descriptor->CalculateFixedFrameSize();
+ if (call_descriptor != nullptr) {
+ fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
}
frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
- CallDescriptor* descriptor) {
+ CallDescriptor* call_descriptor) {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
@@ -336,10 +341,12 @@ class PipelineData {
void InitializeCodeGenerator(Linkage* linkage) {
DCHECK_NULL(code_generator_);
- code_generator_ =
- new CodeGenerator(codegen_zone(), frame(), linkage, sequence(), info(),
- isolate(), osr_helper_, start_source_position_,
- jump_optimization_info_, protected_instructions_);
+ code_generator_ = new CodeGenerator(
+ codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
+ osr_helper_, start_source_position_, jump_optimization_info_,
+ protected_instructions_,
+ info()->is_poison_loads() ? LoadPoisoning::kDoPoison
+ : LoadPoisoning::kDontPoison);
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -451,7 +458,7 @@ class PipelineImpl final {
void RunPrintAndVerify(const char* phase, bool untyped = false);
Handle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor, bool run_verifier);
+ CallDescriptor* call_descriptor, bool run_verifier);
CompilationInfo* info() const;
Isolate* isolate() const;
@@ -778,6 +785,11 @@ class PipelineCompilationJob final : public CompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
Isolate* isolate) {
+ if (compilation_info()->shared_info()->bytecode_array()->length() >
+ kMaxBytecodeSizeForTurbofan) {
+ return AbortOptimization(BailoutReason::kFunctionTooBig);
+ }
+
if (!FLAG_always_opt) {
compilation_info()->MarkAsBailoutOnUninitialized();
}
@@ -790,7 +802,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_inline_accessors) {
compilation_info()->MarkAsAccessorInliningEnabled();
}
- if (compilation_info()->closure()->feedback_vector_cell()->map() ==
+ if (FLAG_branch_load_poisoning) {
+ compilation_info()->MarkAsPoisonLoads();
+ }
+ if (compilation_info()->closure()->feedback_cell()->map() ==
isolate->heap()->one_closure_cell_map()) {
compilation_info()->MarkAsFunctionContextSpecializing();
}
@@ -895,7 +910,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
public:
explicit PipelineWasmCompilationJob(
CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
- CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
std::vector<trap_handler::ProtectedInstructionData>* protected_insts,
bool asmjs_origin)
: CompilationJob(isolate->stack_guard()->real_climit(), nullptr, info,
@@ -906,7 +921,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
data_(&zone_stats_, isolate, info, jsgraph, pipeline_statistics_.get(),
source_positions, protected_insts),
pipeline_(&data_),
- linkage_(descriptor),
+ linkage_(call_descriptor),
asmjs_origin_(asmjs_origin) {}
protected:
@@ -955,7 +970,8 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph(), asmjs_origin_);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ scope.zone());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -986,11 +1002,12 @@ PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
code_generator->tasm()->GetCode(isolate, &wasm_code_desc->code_desc);
wasm_code_desc->safepoint_table_offset =
code_generator->GetSafepointTableOffset();
+ wasm_code_desc->handler_table_offset =
+ code_generator->GetHandlerTableOffset();
wasm_code_desc->frame_slot_count =
code_generator->frame()->GetTotalFrameSlotCount();
wasm_code_desc->source_positions_table =
code_generator->GetSourcePositionTable();
- wasm_code_desc->handler_table = code_generator->GetHandlerTable();
}
return SUCCEEDED;
}
@@ -1113,7 +1130,8 @@ struct InliningPhase {
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
data->info()->is_bailout_on_uninitialized()
? JSCallReducer::kBailoutOnUninitialized
@@ -1217,7 +1235,8 @@ struct TypedLoweringPhase {
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &builtin_reducer);
AddReducer(data, &graph_reducer, &create_lowering);
@@ -1324,7 +1343,8 @@ struct EarlyOptimizationPhase {
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1391,7 +1411,8 @@ struct EffectControlLinearizationPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -1427,7 +1448,8 @@ struct LoadEliminationPhase {
CheckpointElimination checkpoint_elimination(&graph_reducer);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1450,7 +1472,10 @@ struct MemoryOptimizationPhase {
trimmer.TrimGraph(roots.begin(), roots.end());
// Optimize allocations and load/store operations.
- MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
+ MemoryOptimizer optimizer(data->jsgraph(), temp_zone,
+ data->info()->is_poison_loads()
+ ? LoadPoisoning::kDoPoison
+ : LoadPoisoning::kDontPoison);
optimizer.Optimize();
}
};
@@ -1467,7 +1492,8 @@ struct LateOptimizationPhase {
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
+ data->common(), data->machine(),
+ temp_zone);
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
@@ -1525,6 +1551,12 @@ struct InstructionSelectionPhase {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
data->schedule(), data->source_positions(), data->frame(),
+ data->info()->switch_jump_table_enabled()
+ ? InstructionSelector::kEnableSwitchJumpTable
+ : InstructionSelector::kDisableSwitchJumpTable,
+ data->info()->is_generating_speculation_poison_on_entry()
+ ? InstructionSelector::kEnableSpeculationPoison
+ : InstructionSelector::kDisableSpeculationPoison,
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
@@ -1534,7 +1566,9 @@ struct InstructionSelectionPhase {
: InstructionSelector::kDisableScheduling,
data->isolate()->serializer_enabled()
? InstructionSelector::kEnableSerialization
- : InstructionSelector::kDisableSerialization);
+ : InstructionSelector::kDisableSerialization,
+ data->info()->is_poison_loads() ? LoadPoisoning::kDoPoison
+ : LoadPoisoning::kDontPoison);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
}
@@ -2012,8 +2046,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Isolate* isolate, Graph* graph,
Schedule* schedule) {
- CallDescriptor* call_descriptor =
- Linkage::ComputeIncoming(info->zone(), info);
+ auto call_descriptor = Linkage::ComputeIncoming(info->zone(), info);
return GenerateCodeForTesting(info, isolate, call_descriptor, graph,
schedule);
}
@@ -2071,10 +2104,10 @@ CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
// static
CompilationJob* Pipeline::NewWasmCompilationJob(
CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
- CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
std::vector<trap_handler::ProtectedInstructionData>* protected_instructions,
wasm::ModuleOrigin asmjs_origin) {
- return new PipelineWasmCompilationJob(info, isolate, jsgraph, descriptor,
+ return new PipelineWasmCompilationJob(info, isolate, jsgraph, call_descriptor,
source_positions,
protected_instructions, asmjs_origin);
}
@@ -2105,7 +2138,7 @@ void PipelineImpl::ComputeScheduledGraph() {
}
bool PipelineImpl::SelectInstructions(Linkage* linkage) {
- CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+ auto call_descriptor = linkage->GetIncomingDescriptor();
PipelineData* data = this->data_;
// We should have a scheduled graph.
@@ -2186,6 +2219,10 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier);
+ } else if (data->info()->is_poison_loads()) {
+ CHECK(InstructionSelector::SupportsSpeculationPoisoning());
+ AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
+ run_verifier);
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
@@ -2276,7 +2313,7 @@ Handle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
}
void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor,
+ CallDescriptor* call_descriptor,
bool run_verifier) {
PipelineData* data = this->data_;
// Don't track usage for this zone in compiler stats.
@@ -2294,7 +2331,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
data_->sequence()->ValidateDeferredBlockExitPaths();
#endif
- data->InitializeRegisterAllocationData(config, descriptor);
+ data->InitializeRegisterAllocationData(config, call_descriptor);
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
Run<MeetRegisterConstraintsPhase>();
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index b5b6b5f142..92b128c357 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -46,7 +46,7 @@ class Pipeline : public AllStatic {
// Returns a new compilation job for the WebAssembly compilation info.
static CompilationJob* NewWasmCompilationJob(
CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
- CallDescriptor* descriptor, SourcePositionTable* source_positions,
+ CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
std::vector<trap_handler::ProtectedInstructionData>*
protected_instructions,
wasm::ModuleOrigin wasm_origin);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 7fc537784c..6bdf8fa974 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -34,7 +34,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
+ case kFlags_branch_and_poison:
case kFlags_deoptimize:
+ case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
return SetRC;
@@ -270,6 +272,16 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ PPCOperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
+ }
+}
+
} // namespace
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
@@ -779,21 +791,40 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ Register scratch = kScratchReg;
+
+ Label current_pc;
+ __ mov_label_addr(scratch, &current_pc);
+
+ __ bind(&current_pc);
+ __ subi(scratch, scratch, Operand(__ pc_offset()));
+ __ cmp(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- __ mov_label_addr(r11, &current);
- int pc_offset = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
- __ LoadP(r11, MemOperand(r11, offset));
+ if (FLAG_debug_code) {
+ // Check that {kJavaScriptCallCodeStartRegister} is correct.
+ Label current_pc;
+ __ mov_label_addr(ip, &current_pc);
+
+ __ bind(&current_pc);
+ __ subi(ip, ip, Operand(__ pc_offset()));
+ __ cmp(ip, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+ }
+
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ LoadP(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadWordArith(
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
@@ -802,6 +833,37 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne, cr0);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ Register scratch = kScratchReg;
+
+ Label current_pc;
+ __ mov_label_addr(scratch, &current_pc);
+
+ __ bind(&current_pc);
+ __ subi(scratch, scratch, Operand(__ pc_offset()));
+
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ mr(kSpeculationPoisonRegister, scratch);
+ __ sub(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ sub(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ scratch);
+ __ orx(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ ShiftRightArithImm(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kBitsPerPointer - 1);
+ __ notx(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ and_(sp, sp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -909,9 +971,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
+ __ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();
@@ -1808,26 +1871,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, ldx);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#endif
case kPPC_LoadFloat32:
@@ -1856,47 +1926,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
break;
- case kAtomicExchangeInt8:
+ case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint8:
+ case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
- case kAtomicExchangeInt16:
+ case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kAtomicExchangeUint16:
+ case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
break;
- case kAtomicExchangeWord32:
+ case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
break;
default:
@@ -1931,6 +2001,20 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ ArchOpcode op = instr->arch_opcode();
+ condition = NegateFlagsCondition(condition);
+ __ li(kScratchReg, Operand::Zero());
+ __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
+ kScratchReg, kSpeculationPoisonRegister, cr0);
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -1978,8 +2062,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -2109,8 +2194,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
// Save callee-saved Double registers.
if (double_saves != 0) {
@@ -2121,10 +2206,10 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
- const RegList saves =
- FLAG_enable_embedded_constant_pool
- ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() &
+ ~kConstantPoolRegister.bit()
+ : call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
// register save area does not include the fp or constant pool pointer.
const int num_saves =
@@ -2135,9 +2220,9 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ function_descriptor();
__ mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -2148,9 +2233,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ Push(r0, fp);
__ mr(fp, sp);
}
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(ip);
- if (descriptor->PushArgumentCount()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2161,8 +2246,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2174,9 +2259,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
__ Add(sp, sp, -shrink_slots * kPointerSize, r0);
}
@@ -2189,10 +2275,10 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save callee-saved registers.
- const RegList saves =
- FLAG_enable_embedded_constant_pool
- ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() &
+ ~kConstantPoolRegister.bit()
+ : call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
@@ -2200,26 +2286,26 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
// Restore registers.
- const RegList saves =
- FLAG_enable_embedded_constant_pool
- ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? call_descriptor->CalleeSavedRegisters() &
+ ~kConstantPoolRegister.bit()
+ : call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore double registers.
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (double_saves != 0) {
__ MultiPopDoubles(double_saves);
}
PPCOperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index fced5565df..70a6c9ee69 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -194,7 +194,7 @@ void InstructionSelector::VisitLoad(Node* node) {
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
ImmediateMode mode = kInt16Imm;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
@@ -234,6 +234,12 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
+
+ if (node->opcode() == IrOpcode::kPoisonedLoad &&
+ load_poisoning_ == LoadPoisoning::kDoPoison) {
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
if (g.CanBeImmediate(offset, mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
@@ -246,6 +252,8 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1118,6 +1126,16 @@ void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
VisitRR(this, kPPC_DoubleToUint32, node);
}
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord8, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord16, node);
+}
+
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
@@ -1144,6 +1162,20 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
VisitRR(this, kPPC_ExtendSignWord32, node);
}
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord8, node);
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord16, node);
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord32, node);
+}
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
@@ -1533,14 +1565,13 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
g.UseRegister(right), cont);
}
+} // namespace
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, InstructionCode opcode,
- FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1549,58 +1580,58 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
#endif
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1612,28 +1643,27 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(
- selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
+ this, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(selector, node,
- kPPC_SubWithOverflow32,
- kInt16Imm_Negate, cont);
+ return VisitBinop<Int32BinopMatcher>(
+ this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kNotEqual);
- return EmitInt32MulWithOverflow(selector, node, cont);
+ return EmitInt32MulWithOverflow(this, node, cont);
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add64,
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64,
kInt16Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Sub,
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub,
kInt16Imm_Negate, cont);
#endif
default:
@@ -1643,10 +1673,10 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
// TODO(mbandy): opportunity for rlwinm?
- return VisitWordCompare(selector, value, kPPC_Tst32, cont, true,
+ return VisitWordCompare(this, value, kPPC_Tst32, cont, true,
kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt32Add:
@@ -1658,10 +1688,10 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// case IrOpcode::kWord32Ror:
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kWord64And:
// TODO(mbandy): opportunity for rldic?
- return VisitWordCompare(selector, value, kPPC_Tst64, cont, true,
+ return VisitWordCompare(this, value, kPPC_Tst64, cont, true,
kInt16Imm_Unsigned);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt64Add:
@@ -1678,84 +1708,36 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
// Branch could not be combined with a compare, emit compare against 0.
- PPCOperandGenerator g(selector);
- VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+ PPCOperandGenerator g(this);
+ VisitCompare(this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0),
cont);
}
-
-void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
-}
-
-
-#if V8_TARGET_ARCH_PPC64
-void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
-}
-#endif
-
-} // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
PPCOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kPPC_Sub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kPPC_Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -1765,10 +1747,6 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
- }
VisitWord32Compare(this, node, &cont);
}
@@ -1802,10 +1780,6 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
- Int64BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
- }
VisitWord64Compare(this, node, &cont);
}
@@ -1883,16 +1857,15 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
PPCOperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1962,7 +1935,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1970,13 +1943,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1986,7 +1961,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -1995,13 +1970,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2017,7 +1992,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
0, nullptr, input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2025,15 +2000,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2051,19 +2026,19 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAdd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicSub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAnd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicOr(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicXor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
@@ -2241,9 +2216,9 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
// TODO(John): Port.
}
@@ -2257,6 +2232,12 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2279,6 +2260,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index ed67c06cc7..778752e50f 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -212,28 +212,29 @@ void RawMachineAssembler::Comment(const char* msg) {
AddNode(machine()->Comment(msg));
}
-Node* RawMachineAssembler::CallN(CallDescriptor* desc, int input_count,
- Node* const* inputs) {
- DCHECK(!desc->NeedsFrameState());
+Node* RawMachineAssembler::CallN(CallDescriptor* call_descriptor,
+ int input_count, Node* const* inputs) {
+ DCHECK(!call_descriptor->NeedsFrameState());
// +1 is for target.
- DCHECK_EQ(input_count, desc->ParameterCount() + 1);
- return AddNode(common()->Call(desc), input_count, inputs);
+ DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 1);
+ return AddNode(common()->Call(call_descriptor), input_count, inputs);
}
-Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
+Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* call_descriptor,
int input_count,
Node* const* inputs) {
- DCHECK(desc->NeedsFrameState());
+ DCHECK(call_descriptor->NeedsFrameState());
// +2 is for target and frame state.
- DCHECK_EQ(input_count, desc->ParameterCount() + 2);
- return AddNode(common()->Call(desc), input_count, inputs);
+ DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 2);
+ return AddNode(common()->Call(call_descriptor), input_count, inputs);
}
-Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, int input_count,
- Node* const* inputs) {
+Node* RawMachineAssembler::TailCallN(CallDescriptor* call_descriptor,
+ int input_count, Node* const* inputs) {
// +1 is for target.
- DCHECK_EQ(input_count, desc->ParameterCount() + 1);
- Node* tail_call = MakeNode(common()->TailCall(desc), input_count, inputs);
+ DCHECK_EQ(input_count, call_descriptor->ParameterCount() + 1);
+ Node* tail_call =
+ MakeNode(common()->TailCall(call_descriptor), input_count, inputs);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -243,10 +244,10 @@ Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
Node* function) {
MachineSignature::Builder builder(zone(), 1, 0);
builder.AddReturn(return_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function);
+ return AddNode(common()->Call(call_descriptor), function);
}
@@ -256,10 +257,10 @@ Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
MachineSignature::Builder builder(zone(), 1, 1);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0);
+ return AddNode(common()->Call(call_descriptor), function, arg0);
}
Node* RawMachineAssembler::CallCFunction1WithCallerSavedRegisters(
@@ -268,13 +269,13 @@ Node* RawMachineAssembler::CallCFunction1WithCallerSavedRegisters(
MachineSignature::Builder builder(zone(), 1, 1);
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- descriptor->set_save_fp_mode(mode);
+ call_descriptor->set_save_fp_mode(mode);
- return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
- arg0);
+ return AddNode(common()->CallWithCallerSavedRegisters(call_descriptor),
+ function, arg0);
}
Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
@@ -285,10 +286,10 @@ Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
builder.AddReturn(return_type);
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1);
}
Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
@@ -301,10 +302,10 @@ Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2);
}
Node* RawMachineAssembler::CallCFunction3WithCallerSavedRegisters(
@@ -316,13 +317,13 @@ Node* RawMachineAssembler::CallCFunction3WithCallerSavedRegisters(
builder.AddParam(arg0_type);
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
- CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- descriptor->set_save_fp_mode(mode);
+ call_descriptor->set_save_fp_mode(mode);
- return AddNode(common()->CallWithCallerSavedRegisters(descriptor), function,
- arg0, arg1, arg2);
+ return AddNode(common()->CallWithCallerSavedRegisters(call_descriptor),
+ function, arg0, arg1, arg2);
}
Node* RawMachineAssembler::CallCFunction4(
@@ -335,10 +336,11 @@ Node* RawMachineAssembler::CallCFunction4(
builder.AddParam(arg1_type);
builder.AddParam(arg2_type);
builder.AddParam(arg3_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2,
+ arg3);
}
Node* RawMachineAssembler::CallCFunction5(
@@ -353,11 +355,11 @@ Node* RawMachineAssembler::CallCFunction5(
builder.AddParam(arg2_type);
builder.AddParam(arg3_type);
builder.AddParam(arg4_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3,
- arg4);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2,
+ arg3, arg4);
}
Node* RawMachineAssembler::CallCFunction6(
@@ -373,11 +375,11 @@ Node* RawMachineAssembler::CallCFunction6(
builder.AddParam(arg3_type);
builder.AddParam(arg4_type);
builder.AddParam(arg5_type);
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3,
- arg4, arg5);
+ return AddNode(common()->Call(call_descriptor), function, arg0, arg1, arg2,
+ arg3, arg4, arg5);
}
Node* RawMachineAssembler::CallCFunction8(
@@ -397,9 +399,9 @@ Node* RawMachineAssembler::CallCFunction8(
builder.AddParam(arg6_type);
builder.AddParam(arg7_type);
Node* args[] = {function, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), arraysize(args), args);
+ return AddNode(common()->Call(call_descriptor), arraysize(args), args);
}
Node* RawMachineAssembler::CallCFunction9(
@@ -421,9 +423,9 @@ Node* RawMachineAssembler::CallCFunction9(
builder.AddParam(arg8_type);
Node* args[] = {function, arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7, arg8};
- const CallDescriptor* descriptor =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), arraysize(args), args);
+ return AddNode(common()->Call(call_descriptor), arraysize(args), args);
}
BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 1cc56b3379..3d689a089c 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -162,15 +162,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Atomic memory operations.
Node* AtomicLoad(MachineType type, Node* base, Node* index) {
- return AddNode(machine()->AtomicLoad(type), base, index);
+ return AddNode(machine()->Word32AtomicLoad(type), base, index);
}
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
- return AddNode(machine()->AtomicStore(rep), base, index, value);
+ return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
}
#define ATOMIC_FUNCTION(name) \
Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value) { \
- return AddNode(machine()->Atomic##name(rep), base, index, value); \
+ return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \
}
ATOMIC_FUNCTION(Exchange);
ATOMIC_FUNCTION(Add);
@@ -182,7 +182,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
Node* old_value, Node* new_value) {
- return AddNode(machine()->AtomicCompareExchange(rep), base, index,
+ return AddNode(machine()->Word32AtomicCompareExchange(rep), base, index,
old_value, new_value);
}
@@ -744,19 +744,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
+ Node* SpeculationPoison() {
+ return AddNode(machine()->SpeculationPoison(), graph()->start());
+ }
// Call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
- Node* CallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+ Node* CallN(CallDescriptor* call_descriptor, int input_count,
+ Node* const* inputs);
// Call a given call descriptor and the given arguments and frame-state.
// The call target and frame state are passed as part of the {inputs} array.
- Node* CallNWithFrameState(CallDescriptor* desc, int input_count,
+ Node* CallNWithFrameState(CallDescriptor* call_descriptor, int input_count,
Node* const* inputs);
// Tail call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
- Node* TailCallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+ Node* TailCallN(CallDescriptor* call_descriptor, int input_count,
+ Node* const* inputs);
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
@@ -903,6 +908,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder common_;
CallDescriptor* call_descriptor_;
NodeVector parameters_;
+ Node* speculation_poison_;
BasicBlock* current_block_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index a961f0012f..7e22ab22ad 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
-#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
+#ifndef V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
+#define V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
#include "src/compiler/instruction.h"
#include "src/zone/zone-containers.h"
@@ -270,4 +270,4 @@ class RegisterAllocatorVerifier final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_REGISTER_ALLOCATOR_VERIFIER_H_
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 7f65695ee2..43eb408f1e 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -713,7 +713,7 @@ void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
break;
case UsePositionType::kRequiresRegister:
DCHECK(op.IsRegister() || op.IsFPRegister());
- // Fall through.
+ V8_FALLTHROUGH;
case UsePositionType::kAny:
InstructionOperand::ReplaceWith(pos->operand(), &op);
break;
@@ -1897,10 +1897,10 @@ int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kSimd128:
result -= config()->num_float_registers();
- // Fall through.
+ V8_FALLTHROUGH;
case MachineRepresentation::kFloat32:
result -= config()->num_double_registers();
- // Fall through.
+ V8_FALLTHROUGH;
case MachineRepresentation::kFloat64:
result -= config()->num_general_registers();
break;
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 63e94fbdc8..4f6002874c 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGISTER_ALLOCATOR_H_
-#define V8_REGISTER_ALLOCATOR_H_
+#ifndef V8_COMPILER_REGISTER_ALLOCATOR_H_
+#define V8_COMPILER_REGISTER_ALLOCATOR_H_
#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
@@ -1213,4 +1213,4 @@ class LiveRangeConnector final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_REGISTER_ALLOCATOR_H_
+#endif // V8_COMPILER_REGISTER_ALLOCATOR_H_
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index f8a5a9c504..739fb421ab 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -588,10 +588,11 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (use_info.type_check() == TypeCheckKind::kNumber ||
(use_info.type_check() == TypeCheckKind::kNumberOrOddball &&
!output_type->Maybe(Type::BooleanOrNullOrNumber()))) {
- op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber);
+ op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber,
+ use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = simplified()->CheckedTaggedToFloat64(
- CheckTaggedInputMode::kNumberOrOddball);
+ CheckTaggedInputMode::kNumberOrOddball, use_info.feedback());
}
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
@@ -767,6 +768,7 @@ Node* RepresentationChanger::GetBitRepresentationFor(
} else if (m.Is(factory()->true_value())) {
return jsgraph()->Int32Constant(1);
}
+ break;
}
default:
break;
@@ -1062,11 +1064,11 @@ Node* RepresentationChanger::TypeError(Node* node,
std::ostringstream use_str;
use_str << use;
- V8_Fatal(__FILE__, __LINE__,
- "RepresentationChangerError: node #%d:%s of "
- "%s cannot be changed to %s",
- node->id(), node->op()->mnemonic(), out_str.str().c_str(),
- use_str.str().c_str());
+ FATAL(
+ "RepresentationChangerError: node #%d:%s of "
+ "%s cannot be changed to %s",
+ node->id(), node->op()->mnemonic(), out_str.str().c_str(),
+ use_str.str().c_str());
}
return node;
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index b23a3dac5b..571f13cd7d 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -203,25 +203,29 @@ class UseInfo {
Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
feedback);
}
- static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
+ static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros,
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32,
- Truncation::Any(identify_zeros), TypeCheckKind::kSigned32);
+ Truncation::Any(identify_zeros), TypeCheckKind::kSigned32,
+ feedback);
}
- static UseInfo CheckedNumberAsFloat64() {
+ static UseInfo CheckedNumberAsFloat64(const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
- TypeCheckKind::kNumber);
+ TypeCheckKind::kNumber, feedback);
}
- static UseInfo CheckedNumberAsWord32() {
+ static UseInfo CheckedNumberAsWord32(const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
- TypeCheckKind::kNumber);
+ TypeCheckKind::kNumber, feedback);
}
- static UseInfo CheckedNumberOrOddballAsFloat64() {
+ static UseInfo CheckedNumberOrOddballAsFloat64(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
- TypeCheckKind::kNumberOrOddball);
+ TypeCheckKind::kNumberOrOddball, feedback);
}
- static UseInfo CheckedNumberOrOddballAsWord32() {
+ static UseInfo CheckedNumberOrOddballAsWord32(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
- TypeCheckKind::kNumberOrOddball);
+ TypeCheckKind::kNumberOrOddball, feedback);
}
// Undetermined representation.
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index c0d3146be1..8327528c71 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -260,28 +260,28 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
// unsigned number never less than 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_NOP;
- // fall through
+ V8_FALLTHROUGH;
case kSignedLessThan:
return lt;
case kUnsignedGreaterThanOrEqual:
// unsigned number always greater than or equal 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_ALWAYS;
- // fall through
+ V8_FALLTHROUGH;
case kSignedGreaterThanOrEqual:
return ge;
case kUnsignedLessThanOrEqual:
// unsigned number never less than 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return CC_EQ;
- // fall through
+ V8_FALLTHROUGH;
case kSignedLessThanOrEqual:
return le;
case kUnsignedGreaterThan:
// unsigned number always greater than or equal 0
if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
return ne;
- // fall through
+ V8_FALLTHROUGH;
case kSignedGreaterThan:
return gt;
case kOverflow:
@@ -984,6 +984,16 @@ void AdjustStackPointerForTailCall(
}
}
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
+ S390OperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
+ }
+}
+
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -1028,21 +1038,33 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ Register scratch = r1;
+ int pc_offset = __ pc_offset();
+ __ larl(scratch, Operand(-pc_offset/2));
+ __ CmpP(scratch, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- __ larl(r1, &current);
- int pc_offset = __ pc_offset();
- __ bind(&current);
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset);
- __ LoadP(ip, MemOperand(r1, offset));
+ if (FLAG_debug_code) {
+ // Check that {kJavaScriptCallCodeStartRegister} is correct.
+ int pc_offset = __ pc_offset();
+ __ larl(ip, Operand(-pc_offset/2));
+ __ CmpP(ip, kJavaScriptCallCodeStartRegister);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
+ }
+
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ LoadP(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadW(ip,
FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(ip, Code::kMarkedForDeoptimizationBit);
@@ -1051,6 +1073,37 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne);
}
+void CodeGenerator::GenerateSpeculationPoison() {
+ Register scratch = r1;
+
+ Label current_pc;
+ __ larl(scratch, &current_pc);
+
+ __ bind(&current_pc);
+ __ SubP(scratch, Operand(__ pc_offset()));
+
+ // Calculate a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ // difference = (current - expected) | (expected - current)
+ // poison = ~(difference >> (kBitsPerPointer - 1))
+ __ LoadRR(kSpeculationPoisonRegister, scratch);
+ __ SubP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ SubP(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
+ scratch);
+ __ OrP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ kJavaScriptCallCodeStartRegister);
+ __ ShiftRightArithP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
+ Operand(kBitsPerPointer - 1));
+ __ NotP(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
+ __ AndP(sp, sp, kSpeculationPoisonRegister);
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1155,9 +1208,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CmpP(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
- __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(ip);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ __ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(r4);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1925,15 +1979,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kS390_ExtendSignWord8:
+ case kS390_SignExtendWord8ToInt32:
__ lbr(i.OutputRegister(), i.InputRegister(0));
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
- case kS390_ExtendSignWord16:
+ case kS390_SignExtendWord16ToInt32:
__ lhr(i.OutputRegister(), i.InputRegister(0));
CHECK_AND_ZERO_EXT_OUTPUT(1);
break;
- case kS390_ExtendSignWord32:
+ case kS390_SignExtendWord8ToInt64:
+ __ lgbr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_SignExtendWord16ToInt64:
+ __ lghr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_SignExtendWord32ToInt64:
__ lgfr(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_Uint32ToUint64:
@@ -2106,6 +2166,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadB);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_BitcastFloat32ToInt32:
ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadlW), nullInstr);
@@ -2123,27 +2184,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadlB);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(LoadlW);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadW);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16:
ASSEMBLE_LOAD_INTEGER(lrvh);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse32:
ASSEMBLE_LOAD_INTEGER(lrv);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse64:
ASSEMBLE_LOAD_INTEGER(lrvg);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16RR:
__ lrvr(i.OutputRegister(), i.InputRegister(0));
@@ -2157,6 +2226,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
+ EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
@@ -2204,28 +2274,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadInt8:
+ case kWord32AtomicLoadInt8:
__ LoadB(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadUint8:
+ case kWord32AtomicLoadUint8:
__ LoadlB(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadInt16:
+ case kWord32AtomicLoadInt16:
__ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadUint16:
+ case kWord32AtomicLoadUint16:
__ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicLoadWord32:
+ case kWord32AtomicLoadWord32:
__ LoadlW(i.OutputRegister(), i.MemoryOperand());
break;
- case kAtomicStoreWord8:
+ case kWord32AtomicStoreWord8:
__ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
- case kAtomicStoreWord16:
+ case kWord32AtomicStoreWord16:
__ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
- case kAtomicStoreWord32:
+ case kWord32AtomicStoreWord32:
__ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
// 0x aa bb cc dd
@@ -2281,8 +2351,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#endif
- case kAtomicExchangeInt8:
- case kAtomicExchangeUint8: {
+ case kWord32AtomicExchangeInt8:
+ case kWord32AtomicExchangeUint8: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2313,15 +2383,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE_BYTE(3);
__ bind(&done);
- if (opcode == kAtomicExchangeInt8) {
+ if (opcode == kWord32AtomicExchangeInt8) {
__ lbr(output, output);
} else {
__ llcr(output, output);
}
break;
}
- case kAtomicExchangeInt16:
- case kAtomicExchangeUint16: {
+ case kWord32AtomicExchangeInt16:
+ case kWord32AtomicExchangeUint16: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2340,14 +2410,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_EXCHANGE_HALFWORD(1);
__ bind(&done);
- if (opcode == kAtomicExchangeInt8) {
+ if (opcode == kWord32AtomicExchangeInt8) {
__ lhr(output, output);
} else {
__ llhr(output, output);
}
break;
}
- case kAtomicExchangeWord32: {
+ case kWord32AtomicExchangeWord32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2389,6 +2459,19 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ XorP(r0, r0);
+ __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
+ kSpeculationPoisonRegister, r0);
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -2436,8 +2519,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
__ Drop(pop_count);
__ Ret();
} else {
@@ -2537,8 +2621,8 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
// Save callee-saved Double registers.
if (double_saves != 0) {
@@ -2549,7 +2633,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
// register save area does not include the fp or constant pool pointer.
const int num_saves = kNumCalleeSaved - 1;
@@ -2559,15 +2643,15 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ Push(r14, fp);
__ LoadRR(fp, sp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ Push(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2578,8 +2662,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2591,9 +2675,10 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
__ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
}
@@ -2606,7 +2691,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save callee-saved registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
@@ -2614,23 +2699,23 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
// Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ MultiPop(saves);
}
// Restore double registers.
- const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
if (double_saves != 0) {
__ MultiPopDoubles(double_saves);
}
S390OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index cb94da2ec7..b5296f63d0 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -99,9 +99,11 @@ namespace compiler {
V(S390_PushFrame) \
V(S390_StackClaim) \
V(S390_StoreToStackSlot) \
- V(S390_ExtendSignWord8) \
- V(S390_ExtendSignWord16) \
- V(S390_ExtendSignWord32) \
+ V(S390_SignExtendWord8ToInt32) \
+ V(S390_SignExtendWord16ToInt32) \
+ V(S390_SignExtendWord8ToInt64) \
+ V(S390_SignExtendWord16ToInt64) \
+ V(S390_SignExtendWord32ToInt64) \
V(S390_Uint32ToUint64) \
V(S390_Int64ToInt32) \
V(S390_Int64ToFloat32) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 1850830f6e..fd388a219a 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -96,9 +96,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_CmpDouble:
case kS390_Tst32:
case kS390_Tst64:
- case kS390_ExtendSignWord8:
- case kS390_ExtendSignWord16:
- case kS390_ExtendSignWord32:
+ case kS390_SignExtendWord8ToInt32:
+ case kS390_SignExtendWord16ToInt32:
+ case kS390_SignExtendWord8ToInt64:
+ case kS390_SignExtendWord16ToInt64:
+ case kS390_SignExtendWord32ToInt64:
case kS390_Uint32ToUint64:
case kS390_Int64ToInt32:
case kS390_Int64ToFloat32:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 457c5a1d82..61a335d46e 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -329,6 +329,8 @@ ArchOpcode SelectLoadOpcode(Node* node) {
V(Word32Popcnt) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32) \
+ V(SignExtendWord16ToInt32) \
/* Word32 bin op */ \
V(Int32Add) \
V(Int32Sub) \
@@ -717,17 +719,24 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
void InstructionSelector::VisitLoad(Node* node) {
S390OperandGenerator g(this);
- ArchOpcode opcode = SelectLoadOpcode(node);
+ InstructionCode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
+ opcode |= AddressingModeField::encode(mode);
+ if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+
+ Emit(opcode, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1019,7 +1028,7 @@ static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
- selector->Emit(kS390_ExtendSignWord16,
+ selector->Emit(kS390_SignExtendWord16ToInt32,
canEliminateZeroExt ? g.DefineSameAsFirst(node)
: g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -1027,7 +1036,7 @@ static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
return true;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
- selector->Emit(kS390_ExtendSignWord8,
+ selector->Emit(kS390_SignExtendWord8ToInt32,
canEliminateZeroExt ? g.DefineSameAsFirst(node)
: g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -1415,6 +1424,10 @@ static inline bool TryMatchDoubleConstructFromInsert(
null) \
V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
null) \
+ V(Word32, SignExtendWord8ToInt32, kS390_SignExtendWord8ToInt32, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord16ToInt32, kS390_SignExtendWord16ToInt32, \
+ OperandMode::kNone, null) \
V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
OperandMode::kNone, null)
@@ -1427,8 +1440,14 @@ static inline bool TryMatchDoubleConstructFromInsert(
OperandMode::kNone, null)
#define WORD32_UNARY_OP_LIST(V) \
WORD32_UNARY_OP_LIST_32(V) \
- V(Word32, ChangeInt32ToInt64, kS390_ExtendSignWord32, OperandMode::kNone, \
- null) \
+ V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord8ToInt64, kS390_SignExtendWord8ToInt64, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord16ToInt64, kS390_SignExtendWord16ToInt64, \
+ OperandMode::kNone, null) \
+ V(Word32, SignExtendWord32ToInt64, kS390_SignExtendWord32ToInt64, \
+ OperandMode::kNone, null) \
V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
[&]() -> bool { \
if (ProduceWord32Result(node->InputAt(0))) { \
@@ -1650,10 +1669,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
}
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, InstructionCode opcode,
- FlagsContinuation* cont);
-
void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
Node* node, Node* value, FlagsContinuation* cont,
bool discard_output = false);
@@ -1841,13 +1856,13 @@ void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
}
}
+} // namespace
+
// Shared routine for word comparisons against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, InstructionCode opcode,
- FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1857,7 +1872,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
FlagsCondition fc = cont->condition();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -1866,31 +1881,31 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Try to combine the branch with a comparison.
Node* const user = m.node();
Node* const value = m.left().node();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt32Sub:
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
default:
break;
}
}
}
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
}
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -1899,50 +1914,50 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Try to combine the branch with a comparison.
Node* const user = m.node();
Node* const value = m.left().node();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kWord64And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
default:
break;
}
}
}
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
#endif
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1954,46 +1969,46 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord32BinOp(selector, node, kS390_Add32,
- AddOperandMode, cont);
+ return VisitWord32BinOp(this, node, kS390_Add32, AddOperandMode,
+ cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord32BinOp(selector, node, kS390_Sub32,
- SubOperandMode, cont);
+ return VisitWord32BinOp(this, node, kS390_Sub32, SubOperandMode,
+ cont);
case IrOpcode::kInt32MulWithOverflow:
if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitWord32BinOp(
- selector, node, kS390_Mul32,
+ this, node, kS390_Mul32,
OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
} else {
cont->OverwriteAndNegateIfEqual(kNotEqual);
return VisitWord32BinOp(
- selector, node, kS390_Mul32WithOverflow,
+ this, node, kS390_Mul32WithOverflow,
OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
cont);
}
case IrOpcode::kInt32AbsWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord32UnaryOp(selector, node, kS390_Abs32,
+ return VisitWord32UnaryOp(this, node, kS390_Abs32,
OperandMode::kNone, cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64AbsWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord64UnaryOp(selector, node, kS390_Abs64,
+ return VisitWord64UnaryOp(this, node, kS390_Abs64,
OperandMode::kNone, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord64BinOp(selector, node, kS390_Add64,
- AddOperandMode, cont);
+ return VisitWord64BinOp(this, node, kS390_Add64, AddOperandMode,
+ cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitWord64BinOp(selector, node, kS390_Sub64,
- SubOperandMode, cont);
+ return VisitWord64BinOp(this, node, kS390_Sub64, SubOperandMode,
+ cont);
#endif
default:
break;
@@ -2003,17 +2018,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
case IrOpcode::kInt32Sub:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord32Compare(selector, value, cont);
+ return VisitWord32Compare(this, value, cont);
break;
case IrOpcode::kWord32And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
case IrOpcode::kLoad: {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
switch (load_rep.representation()) {
case MachineRepresentation::kWord32:
- if (opcode == kS390_LoadAndTestWord32) {
- return VisitLoadAndTest(selector, opcode, user, value, cont);
- }
+ return VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value,
+ cont);
default:
break;
}
@@ -2024,13 +2038,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
break;
case IrOpcode::kWord32Or:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord32BinOp(selector, value, kS390_Or32, Or32OperandMode,
+ return VisitWord32BinOp(this, value, kS390_Or32, Or32OperandMode,
cont);
break;
case IrOpcode::kWord32Xor:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord32BinOp(selector, value, kS390_Xor32,
- Xor32OperandMode, cont);
+ return VisitWord32BinOp(this, value, kS390_Xor32, Xor32OperandMode,
+ cont);
break;
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Shl:
@@ -2041,22 +2055,22 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64Sub:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
break;
case IrOpcode::kWord64And:
- return VisitTestUnderMask(selector, value, cont);
+ return VisitTestUnderMask(this, value, cont);
case IrOpcode::kInt64Add:
// can't handle overflow case.
break;
case IrOpcode::kWord64Or:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord64BinOp(selector, value, kS390_Or64, Or64OperandMode,
+ return VisitWord64BinOp(this, value, kS390_Or64, Or64OperandMode,
cont);
break;
case IrOpcode::kWord64Xor:
if (fc == kNotEqual || fc == kEqual)
- return VisitWord64BinOp(selector, value, kS390_Xor64,
- Xor64OperandMode, cont);
+ return VisitWord64BinOp(this, value, kS390_Xor64, Xor64OperandMode,
+ cont);
break;
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Shl:
@@ -2071,54 +2085,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
// Branch could not be combined with a compare, emit LoadAndTest
- VisitLoadAndTest(selector, opcode, user, value, cont, true);
-}
-
-void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord32, cont);
-}
-
-#if V8_TARGET_ARCH_S390X
-void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord64, cont);
-}
-#endif
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+ VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -2126,22 +2093,23 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
- }
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ }
#if V8_TARGET_ARCH_S390X
InstructionOperand index_operand_zero_ext = g.TempRegister();
Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
@@ -2150,6 +2118,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
}
+ }
// Generate a sequence of conditional jumps.
return EmitLookupSwitch(sw, value_operand);
@@ -2159,7 +2128,8 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitLoadAndTest(this, kS390_LoadAndTestWord32, m.node(),
+ m.left().node(), &cont, true);
}
VisitWord32Compare(this, node, &cont);
}
@@ -2191,7 +2161,8 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int64BinopMatcher m(node);
if (m.right().Is(0)) {
- return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitLoadAndTest(this, kS390_LoadAndTestWord64, m.node(),
+ m.left().node(), &cont, true);
}
VisitWord64Compare(this, node, &cont);
}
@@ -2252,14 +2223,14 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
S390OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -2301,7 +2272,7 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2309,13 +2280,15 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ opcode =
+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
+ : kWord32AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicLoadWord32;
+ opcode = kWord32AtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2325,7 +2298,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2334,13 +2307,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicStoreWord8;
+ opcode = kWord32AtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicStoreWord16;
+ opcode = kWord32AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicStoreWord32;
+ opcode = kWord32AtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -2356,7 +2329,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2364,15 +2337,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2390,19 +2363,19 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitAtomicAdd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAdd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicSub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicSub(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicAnd(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicAnd(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicOr(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord32AtomicXor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
@@ -2572,9 +2545,9 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
// TODO(John): Port.
}
@@ -2588,6 +2561,12 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2613,6 +2592,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 423d757a4f..980c88a6e6 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -85,6 +85,7 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -116,6 +117,7 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Min) \
@@ -138,6 +140,7 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8ShrS) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
@@ -354,7 +357,6 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
int num_lanes = NumLanes(rep_type);
Node** indices = zone()->NewArray<Node*>(num_lanes);
GetIndexNodes(index, indices, rep_type);
- DCHECK_LT(2, node->InputCount());
Node* value = node->InputAt(2);
DCHECK(HasReplacement(1, value));
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
@@ -387,14 +389,30 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
}
void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
- const Operator* op) {
+ const Operator* op,
+ bool not_horizontal) {
DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ if (not_horizontal) {
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ } else {
+ for (int i = 0; i < num_lanes / 2; ++i) {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ rep_node[i] =
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
+ rep_node[i + num_lanes / 2] =
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
+#else
+ rep_node[i] = graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
+ rep_node[i + num_lanes / 2] =
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
+#endif
+ }
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -436,7 +454,8 @@ Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
SimdType input_rep_type,
- const Operator* op) {
+ const Operator* op,
+ bool not_horizontal) {
DCHECK_EQ(2, node->InputCount());
DCHECK(input_rep_type == SimdType::kInt16x8 ||
input_rep_type == SimdType::kInt8x16);
@@ -446,9 +465,29 @@ void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
int32_t shift_val =
(input_rep_type == SimdType::kInt16x8) ? kShift16 : kShift8;
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = FixUpperBits(graph()->NewNode(op, rep_left[i], rep_right[i]),
- shift_val);
+ if (not_horizontal) {
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i], rep_right[i]), shift_val);
+ }
+ } else {
+ for (int i = 0; i < num_lanes / 2; ++i) {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
+ shift_val);
+ rep_node[i + num_lanes / 2] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
+ shift_val);
+#else
+ rep_node[i] = FixUpperBits(
+ graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
+ shift_val);
+ rep_node[i + num_lanes / 2] = FixUpperBits(
+ graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
+ shift_val);
+#endif
+ }
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -578,9 +617,9 @@ Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
args[3] = graph()->start();
Signature<MachineType>::Builder sig_builder(zone(), 0, 1);
sig_builder.AddParam(MachineType::Pointer());
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), sig_builder.Build());
- Node* call = graph()->NewNode(common()->Call(desc), 4, args);
+ Node* call = graph()->NewNode(common()->Call(call_descriptor), 4, args);
return graph()->NewNode(machine()->Load(LoadRepresentation::Float64()),
stack_slot, jsgraph_->Int32Constant(0), call,
graph()->start());
@@ -636,7 +675,8 @@ void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
break;
case IrOpcode::kI16x8ShrU:
- rep_node[i] = Mask(rep_node[i], kMask16); // Fall through.
+ rep_node[i] = Mask(rep_node[i], kMask16);
+ V8_FALLTHROUGH;
case IrOpcode::kI32x4ShrU:
rep_node[i] =
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
@@ -751,22 +791,35 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kStore: {
+ // For store operation, use replacement type of its input instead of the
+ // one of its effected node.
+ DCHECK_LT(2, node->InputCount());
+ SimdType input_rep_type = ReplacementType(node->InputAt(2));
+ if (input_rep_type != rep_type)
+ replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep =
StoreRepresentationOf(node->op()).representation();
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
- store_op = machine()->Store(StoreRepresentation(
- MachineTypeFrom(rep_type).representation(), write_barrier_kind));
- LowerStoreOp(rep, node, store_op, rep_type);
+ store_op = machine()->Store(
+ StoreRepresentation(MachineTypeFrom(input_rep_type).representation(),
+ write_barrier_kind));
+ LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
+ // For store operation, use replacement type of its input instead of the
+ // one of its effected node.
+ DCHECK_LT(2, node->InputCount());
+ SimdType input_rep_type = ReplacementType(node->InputAt(2));
+ if (input_rep_type != rep_type)
+ replacements_[node->id()].type = input_rep_type;
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
- store_op =
- machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
- LowerStoreOp(rep, node, store_op, rep_type);
+ store_op = machine()->UnalignedStore(
+ MachineTypeFrom(input_rep_type).representation());
+ LowerStoreOp(rep, node, store_op, input_rep_type);
break;
}
case IrOpcode::kReturn: {
@@ -779,18 +832,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kCall: {
// TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
- CallDescriptor* descriptor =
+ auto call_descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
- (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Simd128())) {
+ (call_descriptor->ReturnCount() == 1 &&
+ call_descriptor->GetReturnType(0) == MachineType::Simd128())) {
// We have to adjust the call descriptor.
- const Operator* op =
- common()->Call(GetI32WasmCallDescriptorForSimd(zone(), descriptor));
+ const Operator* op = common()->Call(
+ GetI32WasmCallDescriptorForSimd(zone(), call_descriptor));
NodeProperties::ChangeOp(node, op);
}
- if (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Simd128()) {
+ if (call_descriptor->ReturnCount() == 1 &&
+ call_descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
Node* rep_node[kNumLanes32];
for (int i = 0; i < kNumLanes32; ++i) {
@@ -831,6 +884,14 @@ void SimdScalarLowering::LowerNode(Node* node) {
I32X4_BINOP_CASE(kS128Or, Word32Or)
I32X4_BINOP_CASE(kS128Xor, Word32Xor)
#undef I32X4_BINOP_CASE
+ case IrOpcode::kI32x4AddHoriz: {
+ LowerBinaryOp(node, rep_type, machine()->Int32Add(), false);
+ break;
+ }
+ case IrOpcode::kI16x8AddHoriz: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add(), false);
+ break;
+ }
case IrOpcode::kI16x8Add:
case IrOpcode::kI8x16Add: {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
@@ -940,6 +1001,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerShiftOp(node, rep_type);
break;
}
+ case IrOpcode::kF32x4AddHoriz: {
+ LowerBinaryOp(node, rep_type, machine()->Float32Add(), false);
+ break;
+ }
#define F32X4_BINOP_CASE(name) \
case IrOpcode::kF32x4##name: { \
LowerBinaryOp(node, rep_type, machine()->Float32##name()); \
@@ -1002,7 +1067,11 @@ void SimdScalarLowering::LowerNode(Node* node) {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
int32_t lane = OpParameter<int32_t>(node);
- Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** old_rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = old_rep_node[i];
+ }
if (HasReplacement(0, repNode)) {
rep_node[lane] = GetReplacements(repNode)[0];
} else {
@@ -1075,11 +1144,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
- jsgraph_->Int32Constant(0)));
- rep_node[i] = d.Phi(MachineTypeFrom(rep_type).representation(),
- rep_right[1], rep_left[0]);
+ Node* tmp1 =
+ graph()->NewNode(machine()->Word32Xor(), rep_left[i], rep_right[i]);
+ Node* tmp2 =
+ graph()->NewNode(machine()->Word32And(), boolean_input[i], tmp1);
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Xor(), rep_right[i], tmp2);
}
ReplaceNode(node, rep_node, num_lanes);
break;
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index f7f276cd5e..ad329877e2 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -73,12 +73,13 @@ class SimdScalarLowering {
const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
- void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op);
+ void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
+ bool not_horizontal = true);
void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
bool invert_inputs = false);
Node* FixUpperBits(Node* input, int32_t shift);
void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
- const Operator* op);
+ const Operator* op, bool not_horizontal = true);
Node* Mask(Node* input, int32_t mask);
void LowerSaturateBinaryOp(Node* node, SimdType input_rep_type,
const Operator* op, bool is_signed);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 6e6c011fc1..bde73f4e59 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -82,29 +82,32 @@ MachineRepresentation MachineRepresentationFromArrayType(
return MachineRepresentation::kFloat32;
case kExternalFloat64Array:
return MachineRepresentation::kFloat64;
+ case kExternalBigInt64Array:
+ case kExternalBigUint64Array:
+ UNIMPLEMENTED();
}
UNREACHABLE();
}
UseInfo CheckedUseInfoAsWord32FromHint(
- NumberOperationHint hint,
+ NumberOperationHint hint, const VectorSlotPair& feedback = VectorSlotPair(),
IdentifyZeros identify_zeros = kDistinguishZeros) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- return UseInfo::CheckedSignedSmallAsWord32(identify_zeros,
- VectorSlotPair());
+ return UseInfo::CheckedSignedSmallAsWord32(identify_zeros, feedback);
case NumberOperationHint::kSigned32:
- return UseInfo::CheckedSigned32AsWord32(identify_zeros);
+ return UseInfo::CheckedSigned32AsWord32(identify_zeros, feedback);
case NumberOperationHint::kNumber:
- return UseInfo::CheckedNumberAsWord32();
+ return UseInfo::CheckedNumberAsWord32(feedback);
case NumberOperationHint::kNumberOrOddball:
- return UseInfo::CheckedNumberOrOddballAsWord32();
+ return UseInfo::CheckedNumberOrOddballAsWord32(feedback);
}
UNREACHABLE();
}
-UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
+UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint,
+ const VectorSlotPair& feedback) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
@@ -113,9 +116,9 @@ UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
UNREACHABLE();
break;
case NumberOperationHint::kNumber:
- return UseInfo::CheckedNumberAsFloat64();
+ return UseInfo::CheckedNumberAsFloat64(feedback);
case NumberOperationHint::kNumberOrOddball:
- return UseInfo::CheckedNumberOrOddballAsFloat64();
+ return UseInfo::CheckedNumberOrOddballAsFloat64(feedback);
}
UNREACHABLE();
}
@@ -1022,8 +1025,8 @@ class RepresentationSelector {
}
void VisitCall(Node* node, SimplifiedLowering* lowering) {
- const CallDescriptor* desc = CallDescriptorOf(node->op());
- int params = static_cast<int>(desc->ParameterCount());
+ auto call_descriptor = CallDescriptorOf(node->op());
+ int params = static_cast<int>(call_descriptor->ParameterCount());
int value_input_count = node->op()->ValueInputCount();
// Propagate representation information from call descriptor.
for (int i = 0; i < value_input_count; i++) {
@@ -1033,15 +1036,15 @@ class RepresentationSelector {
} else if ((i - 1) < params) {
ProcessInput(node, i,
TruncatingUseInfoFromRepresentation(
- desc->GetInputType(i).representation()));
+ call_descriptor->GetInputType(i).representation()));
} else {
ProcessInput(node, i, UseInfo::AnyTagged());
}
}
ProcessRemainingInputs(node, value_input_count);
- if (desc->ReturnCount() > 0) {
- SetOutput(node, desc->GetReturnType(0).representation());
+ if (call_descriptor->ReturnCount() > 0) {
+ SetOutput(node, call_descriptor->GetReturnType(0).representation());
} else {
SetOutput(node, MachineRepresentation::kTagged);
}
@@ -1234,12 +1237,16 @@ class RepresentationSelector {
MachineRepresentation field_representation, int field_offset,
Type* field_type, MachineRepresentation value_representation,
Node* value) {
- if (base_taggedness == kTaggedBase &&
- field_offset == HeapObject::kMapOffset) {
- return kMapWriteBarrier;
+ WriteBarrierKind write_barrier_kind =
+ WriteBarrierKindFor(base_taggedness, field_representation, field_type,
+ value_representation, value);
+ if (write_barrier_kind != kNoWriteBarrier) {
+ if (base_taggedness == kTaggedBase &&
+ field_offset == HeapObject::kMapOffset) {
+ write_barrier_kind = kMapWriteBarrier;
+ }
}
- return WriteBarrierKindFor(base_taggedness, field_representation,
- field_type, value_representation, value);
+ return write_barrier_kind;
}
Graph* graph() const { return jsgraph_->graph(); }
@@ -1321,13 +1328,14 @@ class RepresentationSelector {
!right_feedback_type->Maybe(Type::MinusZero())) {
left_identify_zeros = kIdentifyZeros;
}
- UseInfo left_use =
- CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros);
+ UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+ left_identify_zeros);
// For CheckedInt32Add and CheckedInt32Sub, we don't need to do
// a minus zero check for the right hand side, since we already
// know that the left hand side is a proper Signed32 value,
// potentially guarded by a check.
- UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
+ UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, VectorSlotPair(),
+ kIdentifyZeros);
VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
Type::Signed32());
}
@@ -1357,7 +1365,7 @@ class RepresentationSelector {
}
// default case => Float64Add/Sub
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) {
ChangeToPureOp(node, Float64Op(node));
@@ -1456,7 +1464,7 @@ class RepresentationSelector {
return;
}
// default case => Float64Mod
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -1717,9 +1725,10 @@ class RepresentationSelector {
// on Oddballs, so make sure we don't accidentially sneak in a
// hint with Oddball feedback here.
DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
- // Fallthrough
+ V8_FALLTHROUGH;
case NumberOperationHint::kNumber:
- VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
+ VisitBinop(node,
+ CheckedUseInfoAsFloat64FromHint(hint, VectorSlotPair()),
MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -1792,7 +1801,8 @@ class RepresentationSelector {
}
// Checked float64 x float64 => float64
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -1886,7 +1896,8 @@ class RepresentationSelector {
}
// default case => Float64Div
- VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ VisitBinop(node,
+ UseInfo::CheckedNumberOrOddballAsFloat64(VectorSlotPair()),
MachineRepresentation::kFloat64, Type::Number());
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
@@ -2320,7 +2331,6 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
return;
}
- case IrOpcode::kClassOf:
case IrOpcode::kTypeOf: {
return VisitUnop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
@@ -2352,16 +2362,25 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node, simplified()->SeqStringCharCodeAt());
}
} else {
- // TODO(turbofan): Allow builtins to return untagged values.
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedSigned);
+ MachineRepresentation::kWord32);
}
return;
}
case IrOpcode::kStringCodePointAt: {
- // TODO(turbofan): Allow builtins to return untagged values.
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedSigned);
+ Type* string_type = TypeOf(node->InputAt(0));
+ if (string_type->Is(Type::SeqString())) {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ UnicodeEncoding encoding = UnicodeEncodingOf(node->op());
+ NodeProperties::ChangeOp(
+ node, simplified()->SeqStringCodePointAt(encoding));
+ }
+ } else {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
+ }
return;
}
case IrOpcode::kStringFromCharCode: {
@@ -2389,6 +2408,14 @@ class RepresentationSelector {
MachineRepresentation::kTaggedSigned);
return;
}
+ case IrOpcode::kStringSubstring: {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ ProcessInput(node, 1, UseInfo::TruncatingWord32());
+ ProcessInput(node, 2, UseInfo::TruncatingWord32());
+ ProcessRemainingInputs(node, 3);
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl: {
VisitUnop(node, UseInfo::AnyTagged(),
@@ -2396,6 +2423,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckBounds: {
+ const CheckParameters& p = CheckParametersOf(node->op());
Type* index_type = TypeOf(node->InputAt(0));
Type* length_type = TypeOf(node->InputAt(1));
if (index_type->Is(Type::Integral32OrMinusZero())) {
@@ -2414,9 +2442,10 @@ class RepresentationSelector {
}
}
} else {
- VisitBinop(node, UseInfo::CheckedSigned32AsWord32(kIdentifyZeros),
- UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ VisitBinop(
+ node,
+ UseInfo::CheckedSigned32AsWord32(kIdentifyZeros, p.feedback()),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
}
return;
}
@@ -2698,17 +2727,20 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeToNumber: {
- NumberOperationHint const hint = NumberOperationHintOf(node->op());
- switch (hint) {
+ NumberOperationParameters const& p =
+ NumberOperationParametersOf(node->op());
+ switch (p.hint()) {
case NumberOperationHint::kSigned32:
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- VisitUnop(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitUnop(node,
+ CheckedUseInfoAsWord32FromHint(p.hint(), p.feedback()),
MachineRepresentation::kWord32, Type::Signed32());
break;
case NumberOperationHint::kNumber:
case NumberOperationHint::kNumberOrOddball:
- VisitUnop(node, CheckedUseInfoAsFloat64FromHint(hint),
+ VisitUnop(node,
+ CheckedUseInfoAsFloat64FromHint(p.hint(), p.feedback()),
MachineRepresentation::kFloat64);
break;
}
@@ -3040,10 +3072,10 @@ class RepresentationSelector {
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
case IrOpcode::kDeadValue:
+ ProcessInput(node, 0, UseInfo::Any());
return SetOutput(node, MachineRepresentation::kNone);
default:
- V8_Fatal(
- __FILE__, __LINE__,
+ FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
node->opcode(), node->op()->mnemonic(), node->id());
break;
@@ -3621,14 +3653,14 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
// General case for unsigned integer modulus, with optimization for (unknown)
// power of 2 right hand side.
//
- // if rhs then
+ // if rhs == 0 then
+ // zero
+ // else
// msk = rhs - 1
// if rhs & msk != 0 then
// lhs % rhs
// else
// lhs & msk
- // else
- // zero
//
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
@@ -3636,16 +3668,20 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
const Operator* const phi_op =
common()->Phi(MachineRepresentation::kWord32, 2);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
+ Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse), check0,
graph()->start());
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* true0;
+ Node* true0 = zero;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* false0;
{
Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* true1 = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_true1);
@@ -3653,13 +3689,10 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* false1 = graph()->NewNode(machine()->Word32And(), lhs, msk);
- if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
- true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+ if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* false0 = zero;
-
Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
return graph()->NewNode(phi_op, true0, false0, merge0);
}
@@ -3809,10 +3842,10 @@ Operator const* SimplifiedLowering::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
Operator::kNoProperties);
- to_number_operator_.set(common()->Call(desc));
+ to_number_operator_.set(common()->Call(call_descriptor));
}
return to_number_operator_.get();
}
@@ -3821,10 +3854,10 @@ Operator const* SimplifiedLowering::ToNumericOperator() {
if (!to_numeric_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
Operator::kNoProperties);
- to_numeric_operator_.set(common()->Call(desc));
+ to_numeric_operator_.set(common()->Call(call_descriptor));
}
return to_numeric_operator_.get();
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9978bae122..f4802a96d0 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -258,11 +258,6 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
UNREACHABLE();
}
-CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64);
- return OpParameter<CheckTaggedInputMode>(op);
-}
-
std::ostream& operator<<(std::ostream& os, GrowFastElementsMode mode) {
switch (mode) {
case GrowFastElementsMode::kDoubleElements:
@@ -487,8 +482,7 @@ size_t hash_value(NumberOperationHint hint) {
}
NumberOperationHint NumberOperationHintOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kSpeculativeToNumber ||
- op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
op->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
op->opcode() == IrOpcode::kSpeculativeNumberMultiply ||
op->opcode() == IrOpcode::kSpeculativeNumberDivide ||
@@ -507,6 +501,25 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
return OpParameter<NumberOperationHint>(op);
}
+bool operator==(NumberOperationParameters const& lhs,
+ NumberOperationParameters const& rhs) {
+ return lhs.hint() == rhs.hint() && lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(NumberOperationParameters const& p) {
+ return base::hash_combine(p.hint(), p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, NumberOperationParameters const& p) {
+ return os << p.hint() << " " << p.feedback();
+}
+
+NumberOperationParameters const& NumberOperationParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kSpeculativeToNumber, op->opcode());
+ return OpParameter<NumberOperationParameters>(op);
+}
+
size_t hash_value(AllocateParameters info) {
return base::hash_combine(info.type(), info.pretenure());
}
@@ -537,7 +550,9 @@ Type* AllocateTypeOf(const Operator* op) {
}
UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kStringFromCodePoint, op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kStringFromCodePoint ||
+ op->opcode() == IrOpcode::kStringCodePointAt ||
+ op->opcode() == IrOpcode::kSeqStringCodePointAt);
return OpParameter<UnicodeEncoding>(op);
}
@@ -553,7 +568,8 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
const CheckTaggedInputParameters& CheckTaggedInputParametersOf(
const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32 ||
+ op->opcode() == IrOpcode::kCheckedTaggedToFloat64);
return OpParameter<CheckTaggedInputParameters>(op);
}
@@ -655,18 +671,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringCharAt, Operator::kNoProperties, 2, 1) \
- V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
- V(SeqStringCodePointAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
V(TypeOf, Operator::kNoProperties, 1, 1) \
- V(ClassOf, Operator::kNoProperties, 1, 1) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
@@ -710,6 +720,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NewConsString, Operator::kNoProperties, 3, 0) \
V(MaskIndexWithBound, Operator::kNoProperties, 2, 0)
+#define EFFECT_DEPENDENT_OP_LIST(V) \
+ V(StringCharAt, Operator::kNoProperties, 2, 1) \
+ V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringSubstring, Operator::kNoProperties, 3, 1)
+
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
V(SpeculativeNumberEqual) \
@@ -755,6 +771,20 @@ struct SimplifiedOperatorGlobalCache final {
PURE_OP_LIST(PURE)
#undef PURE
+#define EFFECT_DEPENDENT(Name, properties, value_input_count, \
+ control_input_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kNoDeopt | Operator::kNoWrite | \
+ Operator::kNoThrow | properties, \
+ #Name, value_input_count, 1, control_input_count, 1, 1, \
+ 0) {} \
+ }; \
+ Name##Operator k##Name;
+ EFFECT_DEPENDENT_OP_LIST(EFFECT_DEPENDENT)
+#undef EFFECT_DEPENDENT
+
#define CHECKED(Name, value_input_count, value_output_count) \
struct Name##Operator final : public Operator { \
Name##Operator() \
@@ -791,6 +821,33 @@ struct SimplifiedOperatorGlobalCache final {
#undef CHECK_IF
template <UnicodeEncoding kEncoding>
+ struct StringCodePointAtOperator final : public Operator1<UnicodeEncoding> {
+ StringCodePointAtOperator()
+ : Operator1<UnicodeEncoding>(IrOpcode::kStringCodePointAt,
+ Operator::kFoldable | Operator::kNoThrow,
+ "StringCodePointAt", 2, 1, 1, 1, 1, 0,
+ kEncoding) {}
+ };
+ StringCodePointAtOperator<UnicodeEncoding::UTF16>
+ kStringCodePointAtOperatorUTF16;
+ StringCodePointAtOperator<UnicodeEncoding::UTF32>
+ kStringCodePointAtOperatorUTF32;
+
+ template <UnicodeEncoding kEncoding>
+ struct SeqStringCodePointAtOperator final
+ : public Operator1<UnicodeEncoding> {
+ SeqStringCodePointAtOperator()
+ : Operator1<UnicodeEncoding>(IrOpcode::kSeqStringCodePointAt,
+ Operator::kFoldable | Operator::kNoThrow,
+ "SeqStringCodePointAt", 2, 1, 1, 1, 1, 0,
+ kEncoding) {}
+ };
+ SeqStringCodePointAtOperator<UnicodeEncoding::UTF16>
+ kSeqStringCodePointAtOperatorUTF16;
+ SeqStringCodePointAtOperator<UnicodeEncoding::UTF32>
+ kSeqStringCodePointAtOperatorUTF32;
+
+ template <UnicodeEncoding kEncoding>
struct StringFromCodePointOperator final : public Operator1<UnicodeEncoding> {
StringFromCodePointOperator()
: Operator1<UnicodeEncoding>(IrOpcode::kStringFromCodePoint,
@@ -891,12 +948,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckTaggedInputMode kMode>
struct CheckedTaggedToFloat64Operator final
- : public Operator1<CheckTaggedInputMode> {
+ : public Operator1<CheckTaggedInputParameters> {
CheckedTaggedToFloat64Operator()
- : Operator1<CheckTaggedInputMode>(
+ : Operator1<CheckTaggedInputParameters>(
IrOpcode::kCheckedTaggedToFloat64,
Operator::kFoldable | Operator::kNoThrow,
- "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0, kMode) {}
+ "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0,
+ CheckTaggedInputParameters(kMode, VectorSlotPair())) {}
};
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber>
kCheckedTaggedToFloat64NumberOperator;
@@ -1004,14 +1062,13 @@ struct SimplifiedOperatorGlobalCache final {
template <NumberOperationHint kHint>
struct SpeculativeToNumberOperator final
- : public Operator1<NumberOperationHint> {
+ : public Operator1<NumberOperationParameters> {
SpeculativeToNumberOperator()
- : Operator1<NumberOperationHint>(
- IrOpcode::kSpeculativeToNumber, // opcode
- Operator::kFoldable | Operator::kNoThrow, // flags
- "SpeculativeToNumber", // name
- 1, 1, 1, 1, 1, 0, // counts
- kHint) {} // parameter
+ : Operator1<NumberOperationParameters>(
+ IrOpcode::kSpeculativeToNumber,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeToNumber",
+ 1, 1, 1, 1, 1, 0,
+ NumberOperationParameters(kHint, VectorSlotPair())) {}
};
SpeculativeToNumberOperator<NumberOperationHint::kSignedSmall>
kSpeculativeToNumberSignedSmallOperator;
@@ -1032,6 +1089,7 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
#define GET_FROM_CACHE(Name, ...) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
PURE_OP_LIST(GET_FROM_CACHE)
+EFFECT_DEPENDENT_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
GET_FROM_CACHE(ArrayBufferWasNeutered)
GET_FROM_CACHE(ArgumentsFrame)
@@ -1140,14 +1198,19 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
- CheckTaggedInputMode mode) {
- switch (mode) {
- case CheckTaggedInputMode::kNumber:
- return &cache_.kCheckedTaggedToFloat64NumberOperator;
- case CheckTaggedInputMode::kNumberOrOddball:
- return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTaggedToFloat64NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckTaggedInputParameters>(
+ IrOpcode::kCheckedTaggedToFloat64,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToFloat64", 1, 1,
+ 1, 1, 1, 0, CheckTaggedInputParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
@@ -1222,20 +1285,25 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
}
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
- NumberOperationHint hint) {
- switch (hint) {
- case NumberOperationHint::kSignedSmall:
- return &cache_.kSpeculativeToNumberSignedSmallOperator;
- case NumberOperationHint::kSignedSmallInputs:
- break;
- case NumberOperationHint::kSigned32:
- return &cache_.kSpeculativeToNumberSigned32Operator;
- case NumberOperationHint::kNumber:
- return &cache_.kSpeculativeToNumberNumberOperator;
- case NumberOperationHint::kNumberOrOddball:
- return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
+ NumberOperationHint hint, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ return &cache_.kSpeculativeToNumberSignedSmallOperator;
+ case NumberOperationHint::kSignedSmallInputs:
+ break;
+ case NumberOperationHint::kSigned32:
+ return &cache_.kSpeculativeToNumberSigned32Operator;
+ case NumberOperationHint::kNumber:
+ return &cache_.kSpeculativeToNumberNumberOperator;
+ case NumberOperationHint::kNumberOrOddball:
+ return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<NumberOperationParameters>(
+ IrOpcode::kSpeculativeToNumber, Operator::kFoldable | Operator::kNoThrow,
+ "SpeculativeToNumber", 1, 1, 1, 1, 1, 0,
+ NumberOperationParameters(hint, feedback));
}
const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
@@ -1378,6 +1446,28 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
"AllocateRaw", 1, 1, 1, 1, 1, 1, AllocateParameters(type, pretenure));
}
+const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
+ UnicodeEncoding encoding) {
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ return &cache_.kStringCodePointAtOperatorUTF16;
+ case UnicodeEncoding::UTF32:
+ return &cache_.kStringCodePointAtOperatorUTF32;
+ }
+ UNREACHABLE();
+}
+
+const Operator* SimplifiedOperatorBuilder::SeqStringCodePointAt(
+ UnicodeEncoding encoding) {
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ return &cache_.kSeqStringCodePointAtOperatorUTF16;
+ case UnicodeEncoding::UTF32:
+ return &cache_.kSeqStringCodePointAtOperatorUTF32;
+ }
+ UNREACHABLE();
+}
+
const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
UnicodeEncoding encoding) {
switch (encoding) {
@@ -1463,6 +1553,7 @@ const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNonNumberElement(
}
#undef PURE_OP_LIST
+#undef EFFECT_DEPENDENT_OP_LIST
#undef SPECULATIVE_NUMBER_BINOP_LIST
#undef CHECKED_WITH_FEEDBACK_OP_LIST
#undef CHECKED_OP_LIST
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 10961cf452..09a1fed476 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -134,8 +134,6 @@ size_t hash_value(CheckTaggedInputMode);
std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
-CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*);
-
class CheckTaggedInputParameters {
public:
CheckTaggedInputParameters(CheckTaggedInputMode mode,
@@ -353,6 +351,28 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
NumberOperationHint NumberOperationHintOf(const Operator* op)
WARN_UNUSED_RESULT;
+class NumberOperationParameters {
+ public:
+ NumberOperationParameters(NumberOperationHint hint,
+ const VectorSlotPair& feedback)
+ : hint_(hint), feedback_(feedback) {}
+
+ NumberOperationHint hint() const { return hint_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ NumberOperationHint hint_;
+ VectorSlotPair feedback_;
+};
+
+size_t hash_value(NumberOperationParameters const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ const NumberOperationParameters&);
+bool operator==(NumberOperationParameters const&,
+ NumberOperationParameters const&);
+const NumberOperationParameters& NumberOperationParametersOf(const Operator* op)
+ WARN_UNUSED_RESULT;
+
int FormalParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
bool IsRestLengthOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -494,7 +514,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SameValue();
const Operator* TypeOf();
- const Operator* ClassOf();
const Operator* ToBoolean();
@@ -504,19 +523,21 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringCharAt();
const Operator* StringCharCodeAt();
const Operator* SeqStringCharCodeAt();
- const Operator* StringCodePointAt();
- const Operator* SeqStringCodePointAt();
+ const Operator* StringCodePointAt(UnicodeEncoding encoding);
+ const Operator* SeqStringCodePointAt(UnicodeEncoding encoding);
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
const Operator* StringToUpperCaseIntl();
+ const Operator* StringSubstring();
const Operator* FindOrderedHashMapEntry();
const Operator* FindOrderedHashMapEntryForInt32Key();
- const Operator* SpeculativeToNumber(NumberOperationHint hint);
+ const Operator* SpeculativeToNumber(NumberOperationHint hint,
+ const VectorSlotPair& feedback);
const Operator* StringToNumber();
const Operator* PlainPrimitiveToNumber();
@@ -570,7 +591,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedInt32Sub();
const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback);
- const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
+ const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode,
+ const VectorSlotPair& feedback);
const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode,
const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 672acb203d..fac466c36a 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -27,12 +27,11 @@ namespace compiler {
// expression will be evaluated at runtime. If it evaluates to false, then an
// error message will be shown containing the condition, as well as the extra
// info formatted like with printf.
-#define CHECK_EXTRA(condition, fmt, ...) \
- do { \
- if (V8_UNLIKELY(!(condition))) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s. Extra info: " fmt, \
- #condition, ##__VA_ARGS__); \
- } \
+#define CHECK_EXTRA(condition, fmt, ...) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ FATAL("Check failed: %s. Extra info: " fmt, #condition, ##__VA_ARGS__); \
+ } \
} while (0)
#ifdef DEBUG
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 428688abde..ba82536d3d 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -36,6 +36,8 @@ class TypeCache final {
Type* const kUint32 = Type::Unsigned32();
Type* const kFloat32 = Type::Number();
Type* const kFloat64 = Type::Number();
+ Type* const kBigInt64 = Type::BigInt();
+ Type* const kBigUint64 = Type::BigInt();
Type* const kHoleySmi =
Type::Union(Type::SignedSmall(), Type::Hole(), zone());
@@ -95,8 +97,8 @@ class TypeCache final {
// [0, kMaxUInt32].
Type* const kJSArrayLengthType = Type::Unsigned32();
- // The JSTyped::length property always contains a tagged number in the range
- // [0, kMaxSmiValue].
+ // The JSTypedArray::length property always contains a tagged number in the
+ // range [0, kMaxSmiValue].
Type* const kJSTypedArrayLengthType = Type::UnsignedSmall();
// The String::length property always contains a smi in the range
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 12c9a194b8..418fc17859 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1150,10 +1150,6 @@ Type* Typer::Visitor::TypeJSNegate(Node* node) {
return TypeUnaryOp(node, Negate);
}
-Type* Typer::Visitor::TypeClassOf(Node* node) {
- return Type::InternalizedStringOrNull();
-}
-
Type* Typer::Visitor::TypeTypeOf(Node* node) {
return Type::InternalizedString();
}
@@ -1229,10 +1225,18 @@ Type* Typer::Visitor::TypeJSCreateIterResultObject(Node* node) {
return Type::OtherObject();
}
+Type* Typer::Visitor::TypeJSCreateStringIterator(Node* node) {
+ return Type::OtherObject();
+}
+
Type* Typer::Visitor::TypeJSCreateKeyValueArray(Node* node) {
return Type::OtherObject();
}
+Type* Typer::Visitor::TypeJSCreatePromise(Node* node) {
+ return Type::OtherObject();
+}
+
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
return Type::Array();
}
@@ -1576,8 +1580,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kStringToString:
case kStringToUpperCase:
case kStringTrim:
- case kStringTrimLeft:
- case kStringTrimRight:
+ case kStringTrimEnd:
+ case kStringTrimStart:
case kStringValueOf:
return Type::String();
@@ -1768,8 +1772,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToObject);
case Runtime::kInlineToString:
return TypeUnaryOp(node, ToString);
- case Runtime::kInlineClassOf:
- return Type::InternalizedStringOrNull();
case Runtime::kHasInPrototypeChain:
return Type::Boolean();
default:
@@ -1822,6 +1824,10 @@ Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
return Type::SignedSmall();
}
+Type* Typer::Visitor::TypeJSGeneratorRestoreContext(Node* node) {
+ return Type::Any();
+}
+
Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
return Type::Any();
}
@@ -1834,6 +1840,26 @@ Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeJSFulfillPromise(Node* node) {
+ return Type::Undefined();
+}
+
+Type* Typer::Visitor::TypeJSPerformPromiseThen(Node* node) {
+ return Type::Receiver();
+}
+
+Type* Typer::Visitor::TypeJSPromiseResolve(Node* node) {
+ return Type::Receiver();
+}
+
+Type* Typer::Visitor::TypeJSRejectPromise(Node* node) {
+ return Type::Undefined();
+}
+
+Type* Typer::Visitor::TypeJSResolvePromise(Node* node) {
+ return Type::Undefined();
+}
+
// Simplified operators.
Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
@@ -1936,9 +1962,13 @@ Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
-Type* Typer::Visitor::TypeStringToLowerCaseIntl(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeStringToLowerCaseIntl(Node* node) {
+ return Type::String();
+}
-Type* Typer::Visitor::TypeStringToUpperCaseIntl(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeStringToUpperCaseIntl(Node* node) {
+ return Type::String();
+}
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
@@ -1972,6 +2002,8 @@ Type* Typer::Visitor::TypeStringLength(Node* node) {
return typer_->cache_.kStringLengthType;
}
+Type* Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); }
+
Type* Typer::Visitor::TypeMaskIndexWithBound(Node* node) {
return Type::Union(Operand(node, 0), typer_->cache_.kSingletonZero, zone());
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index a3e90d579a..3e3dbbe769 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -273,9 +273,11 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case BYTECODE_ARRAY_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
+ case FEEDBACK_CELL_TYPE:
case FEEDBACK_VECTOR_TYPE:
case PROPERTY_ARRAY_TYPE:
case FOREIGN_TYPE:
+ case SCOPE_INFO_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
@@ -299,8 +301,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case OBJECT_TEMPLATE_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
- case PROMISE_REACTION_JOB_INFO_TYPE:
+ case PROMISE_CAPABILITY_TYPE:
+ case PROMISE_REACTION_TYPE:
case DEBUG_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case WEAK_CELL_TYPE:
@@ -314,6 +316,11 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
case CODE_DATA_CONTAINER_TYPE:
+ case CALLBACK_TASK_TYPE:
+ case CALLABLE_TASK_TYPE:
+ case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
+ case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
+ case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
UNREACHABLE();
}
UNREACHABLE();
@@ -610,11 +617,6 @@ bool UnionType::Wellformed() {
// -----------------------------------------------------------------------------
// Union and intersection
-static bool AddIsSafe(int x, int y) {
- return x >= 0 ? y <= std::numeric_limits<int>::max() - x
- : y >= std::numeric_limits<int>::min() - x;
-}
-
Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
@@ -642,10 +644,9 @@ Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
bitset bits = type1->BitsetGlb() & type2->BitsetGlb();
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any();
- int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any();
- size += 2;
+ int size;
+ if (base::bits::SignedAddOverflow32(size1, size2, &size)) return Any();
+ if (base::bits::SignedAddOverflow32(size, 2, &size)) return Any();
Type* result_type = UnionType::New(size, zone);
UnionType* result = result_type->AsUnion();
size = 0;
@@ -844,10 +845,9 @@ Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
// Slow case: create union.
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any();
- int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any();
- size += 2;
+ int size;
+ if (base::bits::SignedAddOverflow32(size1, size2, &size)) return Any();
+ if (base::bits::SignedAddOverflow32(size, 2, &size)) return Any();
Type* result_type = UnionType::New(size, zone);
UnionType* result = result_type->AsUnion();
size = 0;
diff --git a/deps/v8/src/compiler/unwinding-info-writer.h b/deps/v8/src/compiler/unwinding-info-writer.h
index 86f5e9e800..723b6f9ec2 100644
--- a/deps/v8/src/compiler/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/unwinding-info-writer.h
@@ -52,4 +52,4 @@ class UnwindingInfoWriter {
#endif
-#endif
+#endif // V8_COMPILER_UNWINDING_INFO_WRITER_H_
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index a66a73f5d3..e9a5be6f65 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -191,20 +191,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
}
if (discovered_if_success && !discovered_if_exception) {
- V8_Fatal(__FILE__, __LINE__,
- "#%d:%s should be followed by IfSuccess/IfException, but is "
- "only followed by single #%d:%s",
- node->id(), node->op()->mnemonic(),
- discovered_if_success->id(),
- discovered_if_success->op()->mnemonic());
+ FATAL(
+ "#%d:%s should be followed by IfSuccess/IfException, but is "
+ "only followed by single #%d:%s",
+ node->id(), node->op()->mnemonic(), discovered_if_success->id(),
+ discovered_if_success->op()->mnemonic());
}
if (discovered_if_exception && !discovered_if_success) {
- V8_Fatal(__FILE__, __LINE__,
- "#%d:%s should be followed by IfSuccess/IfException, but is "
- "only followed by single #%d:%s",
- node->id(), node->op()->mnemonic(),
- discovered_if_exception->id(),
- discovered_if_exception->op()->mnemonic());
+ FATAL(
+ "#%d:%s should be followed by IfSuccess/IfException, but is "
+ "only followed by single #%d:%s",
+ node->id(), node->op()->mnemonic(), discovered_if_exception->id(),
+ discovered_if_exception->op()->mnemonic());
}
if (discovered_if_success || discovered_if_exception) {
CHECK_EQ(2, total_number_of_control_uses);
@@ -268,12 +266,13 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
}
case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- CHECK_EQ(IrOpcode::kBranch,
- NodeProperties::GetControlInput(node, 0)->opcode());
+ case IrOpcode::kIfFalse: {
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ CHECK_EQ(IrOpcode::kBranch, control->opcode());
// Type is empty.
CheckNotTyped(node);
break;
+ }
case IrOpcode::kIfSuccess: {
// IfSuccess and IfException continuation only on throwing nodes.
Node* input = NodeProperties::GetControlInput(node, 0);
@@ -311,8 +310,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
}
default: {
- V8_Fatal(__FILE__, __LINE__, "Switch #%d illegally used by #%d:%s",
- node->id(), use->id(), use->op()->mnemonic());
+ FATAL("Switch #%d illegally used by #%d:%s", node->id(), use->id(),
+ use->op()->mnemonic());
break;
}
}
@@ -572,6 +571,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
case IrOpcode::kObjectId:
CheckTypeIs(node, Type::Object());
+ break;
case IrOpcode::kStateValues:
case IrOpcode::kTypedStateValues:
case IrOpcode::kArgumentsElementsState:
@@ -677,10 +677,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreateStringIterator:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateKeyValueArray:
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreatePromise:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateLiteralArray:
// Type is Array.
CheckTypeIs(node, Type::Array());
@@ -742,10 +750,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is Boolean.
CheckTypeIs(node, Type::Boolean());
break;
- case IrOpcode::kClassOf:
- // Type is InternaliedString \/ Null.
- CheckTypeIs(node, Type::InternalizedStringOrNull());
- break;
case IrOpcode::kTypeOf:
// Type is InternalizedString.
CheckTypeIs(node, Type::InternalizedString());
@@ -831,6 +835,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::SignedSmall());
break;
+ case IrOpcode::kJSGeneratorRestoreContext:
+ CheckTypeIs(node, Type::Any());
+ break;
+
case IrOpcode::kJSGeneratorRestoreRegister:
CheckTypeIs(node, Type::Any());
break;
@@ -845,6 +853,35 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
+ case IrOpcode::kJSFulfillPromise:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Undefined());
+ break;
+ case IrOpcode::kJSPerformPromiseThen:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckValueInputIs(node, 2, Type::Any());
+ CheckValueInputIs(node, 3, Type::Any());
+ CheckTypeIs(node, Type::Receiver());
+ break;
+ case IrOpcode::kJSPromiseResolve:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Receiver());
+ break;
+ case IrOpcode::kJSRejectPromise:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckValueInputIs(node, 2, Type::Any());
+ CheckTypeIs(node, Type::Undefined());
+ break;
+ case IrOpcode::kJSResolvePromise:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Any());
+ CheckTypeIs(node, Type::Undefined());
+ break;
+
case IrOpcode::kComment:
case IrOpcode::kDebugAbort:
case IrOpcode::kDebugBreak:
@@ -1097,7 +1134,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::String());
CheckTypeIs(node, Type::String());
break;
-
+ case IrOpcode::kStringSubstring:
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::SignedSmall());
+ CheckValueInputIs(node, 2, Type::SignedSmall());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kReferenceEqual:
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
@@ -1471,6 +1513,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Machine operators
// -----------------------
case IrOpcode::kLoad:
+ case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
@@ -1630,21 +1673,27 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
+ case IrOpcode::kSpeculationPoison:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kAtomicLoad:
- case IrOpcode::kAtomicStore:
- case IrOpcode::kAtomicExchange:
- case IrOpcode::kAtomicCompareExchange:
- case IrOpcode::kAtomicAdd:
- case IrOpcode::kAtomicSub:
- case IrOpcode::kAtomicAnd:
- case IrOpcode::kAtomicOr:
- case IrOpcode::kAtomicXor:
+ case IrOpcode::kWord32AtomicLoad:
+ case IrOpcode::kWord32AtomicStore:
+ case IrOpcode::kWord32AtomicExchange:
+ case IrOpcode::kWord32AtomicCompareExchange:
+ case IrOpcode::kWord32AtomicAdd:
+ case IrOpcode::kWord32AtomicSub:
+ case IrOpcode::kWord32AtomicAnd:
+ case IrOpcode::kWord32AtomicOr:
+ case IrOpcode::kWord32AtomicXor:
case IrOpcode::kSpeculationFence:
+ case IrOpcode::kSignExtendWord8ToInt32:
+ case IrOpcode::kSignExtendWord16ToInt32:
+ case IrOpcode::kSignExtendWord8ToInt64:
+ case IrOpcode::kSignExtendWord16ToInt64:
+ case IrOpcode::kSignExtendWord32ToInt64:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
@@ -1673,9 +1722,8 @@ void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs,
other->opcode() == IrOpcode::kProjection &&
other->InputAt(0) == node &&
ProjectionIndexOf(other->op()) == ProjectionIndexOf(proj->op())) {
- V8_Fatal(__FILE__, __LINE__,
- "Node #%d:%s has duplicate projections #%d and #%d",
- node->id(), node->op()->mnemonic(), proj->id(), other->id());
+ FATAL("Node #%d:%s has duplicate projections #%d and #%d", node->id(),
+ node->op()->mnemonic(), proj->id(), other->id());
}
}
}
@@ -1726,10 +1774,9 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
Node* input = node->InputAt(j);
if (!HasDominatingDef(schedule, node->InputAt(j), block, use_block,
use_pos)) {
- V8_Fatal(__FILE__, __LINE__,
- "Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
- node->id(), node->op()->mnemonic(), block->rpo_number(), j,
- input->id(), input->op()->mnemonic());
+ FATAL("Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
+ node->id(), node->op()->mnemonic(), block->rpo_number(), j,
+ input->id(), input->op()->mnemonic());
}
}
// Ensure that nodes are dominated by their control inputs;
@@ -1739,10 +1786,9 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
node->opcode() != IrOpcode::kEnd) {
Node* ctl = NodeProperties::GetControlInput(node);
if (!Dominates(schedule, ctl, node)) {
- V8_Fatal(__FILE__, __LINE__,
- "Node #%d:%s in B%d is not dominated by control input #%d:%s",
- node->id(), node->op()->mnemonic(), block->rpo_number(),
- ctl->id(), ctl->op()->mnemonic());
+ FATAL("Node #%d:%s in B%d is not dominated by control input #%d:%s",
+ node->id(), node->op()->mnemonic(), block->rpo_number(), ctl->id(),
+ ctl->op()->mnemonic());
}
}
}
@@ -1835,8 +1881,8 @@ void ScheduleVerifier::Run(Schedule* schedule) {
BitVector* block_doms = dominators[block->id().ToSize()];
BasicBlock* idom = block->dominator();
if (idom != nullptr && !block_doms->Contains(idom->id().ToInt())) {
- V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
- block->rpo_number(), idom->rpo_number());
+ FATAL("Block B%d is not dominated by B%d", block->rpo_number(),
+ idom->rpo_number());
}
for (size_t s = 0; s < block->SuccessorCount(); s++) {
BasicBlock* succ = block->SuccessorAt(s);
@@ -1872,9 +1918,8 @@ void ScheduleVerifier::Run(Schedule* schedule) {
schedule->GetBlockById(BasicBlock::Id::FromInt(it.Current()));
if (dom != idom &&
!dominators[idom->id().ToSize()]->Contains(dom->id().ToInt())) {
- V8_Fatal(__FILE__, __LINE__,
- "Block B%d is not immediately dominated by B%d",
- block->rpo_number(), idom->rpo_number());
+ FATAL("Block B%d is not immediately dominated by B%d",
+ block->rpo_number(), idom->rpo_number());
}
}
}
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 9bbf5f3a3f..080479a010 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -10,6 +10,7 @@
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
+#include "src/base/v8-fallthrough.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
@@ -53,9 +54,9 @@ namespace compiler {
#define WASM_64 0
#endif
-#define FATAL_UNSUPPORTED_OPCODE(opcode) \
- V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", (opcode), \
- wasm::WasmOpcodes::OpcodeName(opcode));
+#define FATAL_UNSUPPORTED_OPCODE(opcode) \
+ FATAL("Unsupported opcode 0x%x:%s", (opcode), \
+ wasm::WasmOpcodes::OpcodeName(opcode));
namespace {
@@ -227,14 +228,14 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Handle<Code> code = BUILTIN_CODE(jsgraph()->isolate(), WasmStackGuard);
CallInterfaceDescriptor idesc =
WasmRuntimeCallDescriptor(jsgraph()->isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), idesc, 0,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), 1, Linkage::kNoContext);
Node* stub_code = jsgraph()->HeapConstant(code);
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- *effect, stack_check.if_false);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor),
+ stub_code, *effect, stack_check.if_false);
SetSourcePosition(call, position);
@@ -563,16 +564,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
case wasm::kExprF64Sqrt:
op = m->Float64Sqrt();
break;
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32UConvertF32:
case wasm::kExprI32SConvertF64:
- return BuildI32SConvertF64(input, position, NumericImplementation::kTrap);
- case wasm::kExprI32SConvertSatF64:
- return BuildI32SConvertF64(input, position,
- NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF64:
- return BuildI32UConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF64:
case wasm::kExprI32UConvertSatF64:
- return BuildI32UConvertF64(input, position,
- NumericImplementation::kSaturate);
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI32UConvertSatF32:
+ return BuildIntConvertFloat(input, position, opcode);
case wasm::kExprI32AsmjsSConvertF64:
return BuildI32AsmjsSConvertF64(input);
case wasm::kExprI32AsmjsUConvertF64:
@@ -592,16 +592,6 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
case wasm::kExprF32UConvertI32:
op = m->RoundUint32ToFloat32();
break;
- case wasm::kExprI32SConvertF32:
- return BuildI32SConvertF32(input, position, NumericImplementation::kTrap);
- case wasm::kExprI32SConvertSatF32:
- return BuildI32SConvertF32(input, position,
- NumericImplementation::kSaturate);
- case wasm::kExprI32UConvertF32:
- return BuildI32UConvertF32(input, position, NumericImplementation::kTrap);
- case wasm::kExprI32UConvertSatF32:
- return BuildI32UConvertF32(input, position,
- NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF32:
return BuildI32AsmjsSConvertF32(input);
case wasm::kExprI32AsmjsUConvertF32:
@@ -780,14 +770,32 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
}
op = m->RoundUint64ToFloat64();
break;
+ case wasm::kExprI32SExtendI8:
+ op = m->SignExtendWord8ToInt32();
+ break;
+ case wasm::kExprI32SExtendI16:
+ op = m->SignExtendWord16ToInt32();
+ break;
+ case wasm::kExprI64SExtendI8:
+ op = m->SignExtendWord8ToInt64();
+ break;
+ case wasm::kExprI64SExtendI16:
+ op = m->SignExtendWord16ToInt64();
+ break;
+ case wasm::kExprI64SExtendI32:
+ op = m->SignExtendWord32ToInt64();
+ break;
case wasm::kExprI64SConvertF32:
- return BuildI64SConvertF32(input, position);
- case wasm::kExprI64SConvertF64:
- return BuildI64SConvertF64(input, position);
case wasm::kExprI64UConvertF32:
- return BuildI64UConvertF32(input, position);
+ case wasm::kExprI64SConvertF64:
case wasm::kExprI64UConvertF64:
- return BuildI64UConvertF64(input, position);
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64UConvertSatF32:
+ case wasm::kExprI64SConvertSatF64:
+ case wasm::kExprI64UConvertSatF64:
+ return jsgraph()->machine()->Is32()
+ ? BuildCcallConvertFloat(input, position, opcode)
+ : BuildIntConvertFloat(input, position, opcode);
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -1037,12 +1045,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
case wasm::kWasmF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case wasm::kWasmI64:
result = jsgraph()->Int64Constant(0);
break;
case wasm::kWasmF32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case wasm::kWasmI32:
result = jsgraph()->Int32Constant(0);
break;
@@ -1184,12 +1194,14 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
case MachineRepresentation::kFloat64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case MachineRepresentation::kWord64:
result = jsgraph()->Int64Constant(0);
break;
case MachineRepresentation::kFloat32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
isFloat = true;
+ V8_FALLTHROUGH;
case MachineRepresentation::kWord32:
case MachineRepresentation::kWord16:
result = jsgraph()->Int32Constant(0);
@@ -1373,167 +1385,281 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
#endif
}
-// Helper classes for float to int conversions.
-struct WasmGraphBuilder::IntConvertOps {
- MachineRepresentation word_rep() const {
- return MachineRepresentation::kWord32;
- }
- Node* zero() const { return builder_->Int32Constant(0); }
- virtual Node* min() const = 0;
- virtual Node* max() const = 0;
- virtual ~IntConvertOps() = default;
-
- protected:
- explicit IntConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
- WasmGraphBuilder* builder_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(IntConvertOps);
-};
-
-struct I32SConvertOps final : public WasmGraphBuilder::IntConvertOps {
- explicit I32SConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::IntConvertOps(builder) {}
- ~I32SConvertOps() = default;
- Node* min() const {
- return builder_->Int32Constant(std::numeric_limits<int32_t>::min());
- }
- Node* max() const {
- return builder_->Int32Constant(std::numeric_limits<int32_t>::max());
- }
- DISALLOW_IMPLICIT_CONSTRUCTORS(I32SConvertOps);
-};
-
-struct I32UConvertOps final : public WasmGraphBuilder::IntConvertOps {
- explicit I32UConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::IntConvertOps(builder) {}
- ~I32UConvertOps() = default;
- Node* min() const {
- return builder_->Int32Constant(std::numeric_limits<uint32_t>::min());
- }
- Node* max() const {
- return builder_->Int32Constant(std::numeric_limits<uint32_t>::max());
- }
- DISALLOW_IMPLICIT_CONSTRUCTORS(I32UConvertOps);
-};
-
-struct WasmGraphBuilder::FloatConvertOps {
- virtual Node* zero() const = 0;
- virtual wasm::WasmOpcode trunc_op() const = 0;
- virtual wasm::WasmOpcode ne_op() const = 0;
- virtual wasm::WasmOpcode lt_op() const = 0;
- virtual ~FloatConvertOps() = default;
-
- protected:
- explicit FloatConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
- WasmGraphBuilder* builder_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FloatConvertOps);
-};
-
-struct F32ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
- explicit F32ConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::FloatConvertOps(builder) {}
- ~F32ConvertOps() = default;
- Node* zero() const { return builder_->Float32Constant(0.0); }
- wasm::WasmOpcode trunc_op() const { return wasm::kExprF32Trunc; }
- wasm::WasmOpcode ne_op() const { return wasm::kExprF32Ne; }
- wasm::WasmOpcode lt_op() const { return wasm::kExprF32Lt; }
- DISALLOW_IMPLICIT_CONSTRUCTORS(F32ConvertOps);
-};
-
-struct F64ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
- explicit F64ConvertOps(WasmGraphBuilder* builder)
- : WasmGraphBuilder::FloatConvertOps(builder) {}
- ~F64ConvertOps() = default;
- Node* zero() const { return builder_->Float64Constant(0.0); }
- wasm::WasmOpcode trunc_op() const { return wasm::kExprF64Trunc; }
- wasm::WasmOpcode ne_op() const { return wasm::kExprF64Ne; }
- wasm::WasmOpcode lt_op() const { return wasm::kExprF64Lt; }
- DISALLOW_IMPLICIT_CONSTRUCTORS(F64ConvertOps);
-};
-
-Node* WasmGraphBuilder::BuildConvertCheck(Node* test, Node* result, Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl,
- const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops) {
- switch (impl) {
- case NumericImplementation::kTrap:
+namespace {
+
+MachineType IntConvertType(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI32SConvertSatF64:
+ return MachineType::Int32();
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI32UConvertSatF32:
+ case wasm::kExprI32UConvertSatF64:
+ return MachineType::Uint32();
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64SConvertSatF64:
+ return MachineType::Int64();
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI64UConvertSatF32:
+ case wasm::kExprI64UConvertSatF64:
+ return MachineType::Uint64();
+ default:
+ UNREACHABLE();
+ }
+}
+
+MachineType FloatConvertType(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI32UConvertSatF32:
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64UConvertSatF32:
+ return MachineType::Float32();
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI32SConvertSatF64:
+ case wasm::kExprI32UConvertSatF64:
+ case wasm::kExprI64SConvertSatF64:
+ case wasm::kExprI64UConvertSatF64:
+ return MachineType::Float64();
+ default:
+ UNREACHABLE();
+ }
+}
+
+const Operator* ConvertOp(WasmGraphBuilder* builder, wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32SConvertSatF32:
+ return builder->jsgraph()->machine()->TruncateFloat32ToInt32();
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32UConvertSatF32:
+ return builder->jsgraph()->machine()->TruncateFloat32ToUint32();
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32SConvertSatF64:
+ return builder->jsgraph()->machine()->ChangeFloat64ToInt32();
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI32UConvertSatF64:
+ return builder->jsgraph()->machine()->TruncateFloat64ToUint32();
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64SConvertSatF32:
+ return builder->jsgraph()->machine()->TryTruncateFloat32ToInt64();
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64UConvertSatF32:
+ return builder->jsgraph()->machine()->TryTruncateFloat32ToUint64();
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64SConvertSatF64:
+ return builder->jsgraph()->machine()->TryTruncateFloat64ToInt64();
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI64UConvertSatF64:
+ return builder->jsgraph()->machine()->TryTruncateFloat64ToUint64();
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode ConvertBackOp(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32SConvertSatF32:
+ return wasm::kExprF32SConvertI32;
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32UConvertSatF32:
+ return wasm::kExprF32UConvertI32;
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32SConvertSatF64:
+ return wasm::kExprF64SConvertI32;
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI32UConvertSatF64:
+ return wasm::kExprF64UConvertI32;
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool IsTrappingConvertOp(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI32SConvertF32:
+ case wasm::kExprI32UConvertF32:
+ case wasm::kExprI32SConvertF64:
+ case wasm::kExprI32UConvertF64:
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64UConvertF64:
+ return true;
+ case wasm::kExprI32SConvertSatF64:
+ case wasm::kExprI32UConvertSatF64:
+ case wasm::kExprI32SConvertSatF32:
+ case wasm::kExprI32UConvertSatF32:
+ case wasm::kExprI64SConvertSatF32:
+ case wasm::kExprI64UConvertSatF32:
+ case wasm::kExprI64SConvertSatF64:
+ case wasm::kExprI64UConvertSatF64:
+ return false;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* Zero(WasmGraphBuilder* builder, const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kWord32:
+ return builder->Int32Constant(0);
+ case MachineRepresentation::kWord64:
+ return builder->Int64Constant(0);
+ case MachineRepresentation::kFloat32:
+ return builder->Float32Constant(0.0);
+ case MachineRepresentation::kFloat64:
+ return builder->Float64Constant(0.0);
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* Min(WasmGraphBuilder* builder, const MachineType& ty) {
+ switch (ty.semantic()) {
+ case MachineSemantic::kInt32:
+ return builder->Int32Constant(std::numeric_limits<int32_t>::min());
+ case MachineSemantic::kUint32:
+ return builder->Int32Constant(std::numeric_limits<uint32_t>::min());
+ case MachineSemantic::kInt64:
+ return builder->Int64Constant(std::numeric_limits<int64_t>::min());
+ case MachineSemantic::kUint64:
+ return builder->Int64Constant(std::numeric_limits<uint64_t>::min());
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* Max(WasmGraphBuilder* builder, const MachineType& ty) {
+ switch (ty.semantic()) {
+ case MachineSemantic::kInt32:
+ return builder->Int32Constant(std::numeric_limits<int32_t>::max());
+ case MachineSemantic::kUint32:
+ return builder->Int32Constant(std::numeric_limits<uint32_t>::max());
+ case MachineSemantic::kInt64:
+ return builder->Int64Constant(std::numeric_limits<int64_t>::max());
+ case MachineSemantic::kUint64:
+ return builder->Int64Constant(std::numeric_limits<uint64_t>::max());
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode TruncOp(const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kFloat32:
+ return wasm::kExprF32Trunc;
+ case MachineRepresentation::kFloat64:
+ return wasm::kExprF64Trunc;
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode NeOp(const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kFloat32:
+ return wasm::kExprF32Ne;
+ case MachineRepresentation::kFloat64:
+ return wasm::kExprF64Ne;
+ default:
+ UNREACHABLE();
+ }
+}
+
+wasm::WasmOpcode LtOp(const MachineType& ty) {
+ switch (ty.representation()) {
+ case MachineRepresentation::kFloat32:
+ return wasm::kExprF32Lt;
+ case MachineRepresentation::kFloat64:
+ return wasm::kExprF64Lt;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* ConvertTrapTest(WasmGraphBuilder* builder, wasm::WasmOpcode opcode,
+ const MachineType& int_ty, const MachineType& float_ty,
+ Node* trunc, Node* converted_value) {
+ if (int_ty.representation() == MachineRepresentation::kWord32) {
+ Node* check = builder->Unop(ConvertBackOp(opcode), converted_value);
+ return builder->Binop(NeOp(float_ty), trunc, check);
+ }
+ return builder->graph()->NewNode(builder->jsgraph()->common()->Projection(1),
+ trunc, builder->graph()->start());
+}
+
+Node* ConvertSaturateTest(WasmGraphBuilder* builder, wasm::WasmOpcode opcode,
+ const MachineType& int_ty,
+ const MachineType& float_ty, Node* trunc,
+ Node* converted_value) {
+ Node* test = ConvertTrapTest(builder, opcode, int_ty, float_ty, trunc,
+ converted_value);
+ if (int_ty.representation() == MachineRepresentation::kWord64) {
+ test = builder->Binop(wasm::kExprI64Eq, test, builder->Int64Constant(0));
+ }
+ return test;
+}
+
+} // namespace
+
+Node* WasmGraphBuilder::BuildIntConvertFloat(Node* input,
+ wasm::WasmCodePosition position,
+ wasm::WasmOpcode opcode) {
+ const MachineType int_ty = IntConvertType(opcode);
+ const MachineType float_ty = FloatConvertType(opcode);
+ const Operator* conv_op = ConvertOp(this, opcode);
+ Node* trunc = nullptr;
+ Node* converted_value = nullptr;
+ const bool is_int32 =
+ int_ty.representation() == MachineRepresentation::kWord32;
+ if (is_int32) {
+ trunc = Unop(TruncOp(float_ty), input);
+ converted_value = graph()->NewNode(conv_op, trunc);
+ } else {
+ trunc = graph()->NewNode(conv_op, input);
+ converted_value = graph()->NewNode(jsgraph()->common()->Projection(0),
+ trunc, graph()->start());
+ }
+ if (IsTrappingConvertOp(opcode)) {
+ Node* test =
+ ConvertTrapTest(this, opcode, int_ty, float_ty, trunc, converted_value);
+ if (is_int32) {
TrapIfTrue(wasm::kTrapFloatUnrepresentable, test, position);
- return result;
- case NumericImplementation::kSaturate: {
- Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
- tl_d.Chain(*control_);
- Diamond nan_d(graph(), jsgraph()->common(),
- Binop(float_ops->ne_op(), input, input), // Checks if NaN.
- BranchHint::kFalse);
- nan_d.Nest(tl_d, true);
- Diamond sat_d(graph(), jsgraph()->common(),
- Binop(float_ops->lt_op(), input, float_ops->zero()),
- BranchHint::kNone);
- sat_d.Nest(nan_d, false);
- Node* sat_val =
- sat_d.Phi(int_ops->word_rep(), int_ops->min(), int_ops->max());
- Node* nan_val = nan_d.Phi(int_ops->word_rep(), int_ops->zero(), sat_val);
- return tl_d.Phi(int_ops->word_rep(), nan_val, result);
+ } else {
+ ZeroCheck64(wasm::kTrapFloatUnrepresentable, test, position);
}
- }
- UNREACHABLE();
-}
-
-Node* WasmGraphBuilder::BuildI32ConvertOp(
- Node* input, wasm::WasmCodePosition position, NumericImplementation impl,
- const Operator* op, wasm::WasmOpcode check_op, const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops) {
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(float_ops->trunc_op(), input);
- Node* result = graph()->NewNode(op, trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we
- // trap/saturate.
- Node* check = Unop(check_op, result);
- Node* overflow = Binop(float_ops->ne_op(), trunc, check);
- return BuildConvertCheck(overflow, result, input, position, impl, int_ops,
- float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32SConvertOps int_ops(this);
- F32ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->TruncateFloat32ToInt32(),
- wasm::kExprF32SConvertI32, &int_ops, &float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32SConvertOps int_ops(this);
- F64ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->ChangeFloat64ToInt32(),
- wasm::kExprF64SConvertI32, &int_ops, &float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32UConvertOps int_ops(this);
- F32ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->TruncateFloat32ToUint32(),
- wasm::kExprF32UConvertI32, &int_ops, &float_ops);
-}
-
-Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl) {
- I32UConvertOps int_ops(this);
- F64ConvertOps float_ops(this);
- return BuildI32ConvertOp(input, position, impl,
- jsgraph()->machine()->TruncateFloat64ToUint32(),
- wasm::kExprF64UConvertI32, &int_ops, &float_ops);
+ return converted_value;
+ }
+ Node* test = ConvertSaturateTest(this, opcode, int_ty, float_ty, trunc,
+ converted_value);
+ Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
+ tl_d.Chain(Control());
+ Node* nan_test = Binop(NeOp(float_ty), input, input);
+ Diamond nan_d(graph(), jsgraph()->common(), nan_test, BranchHint::kFalse);
+ nan_d.Nest(tl_d, true);
+ Node* neg_test = Binop(LtOp(float_ty), input, Zero(this, float_ty));
+ Diamond sat_d(graph(), jsgraph()->common(), neg_test, BranchHint::kNone);
+ sat_d.Nest(nan_d, false);
+ Node* sat_val =
+ sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
+ Node* nan_val =
+ nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
+ return tl_d.Phi(int_ty.representation(), nan_val, converted_value);
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
@@ -1797,106 +1923,81 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
return load;
}
-Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
- }
-}
-
-Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
- }
-}
+namespace {
-Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
+ExternalReference convert_ccall_ref(WasmGraphBuilder* builder,
+ wasm::WasmOpcode opcode) {
+ switch (opcode) {
+ case wasm::kExprI64SConvertF32:
+ case wasm::kExprI64SConvertSatF32:
+ return ExternalReference::wasm_float32_to_int64(
+ builder->jsgraph()->isolate());
+ case wasm::kExprI64UConvertF32:
+ case wasm::kExprI64UConvertSatF32:
+ return ExternalReference::wasm_float32_to_uint64(
+ builder->jsgraph()->isolate());
+ case wasm::kExprI64SConvertF64:
+ case wasm::kExprI64SConvertSatF64:
+ return ExternalReference::wasm_float64_to_int64(
+ builder->jsgraph()->isolate());
+ case wasm::kExprI64UConvertF64:
+ case wasm::kExprI64UConvertSatF64:
+ return ExternalReference::wasm_float64_to_uint64(
+ builder->jsgraph()->isolate());
+ default:
+ UNREACHABLE();
}
}
-Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- if (jsgraph()->machine()->Is32()) {
- return BuildFloatToIntConversionInstruction(
- input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64(), position);
- } else {
- Node* trunc = graph()->NewNode(
- jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
- graph()->start());
- Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
- graph()->start());
- ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
- return result;
- }
-}
+} // namespace
-Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
- Node* input, ExternalReference ref,
- MachineRepresentation parameter_representation,
- const MachineType result_type, wasm::WasmCodePosition position) {
+Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
+ wasm::WasmCodePosition position,
+ wasm::WasmOpcode opcode) {
+ const MachineType int_ty = IntConvertType(opcode);
+ const MachineType float_ty = FloatConvertType(opcode);
+ ExternalReference call_ref = convert_ccall_ref(this, opcode);
Node* stack_slot_param = graph()->NewNode(
- jsgraph()->machine()->StackSlot(parameter_representation));
+ jsgraph()->machine()->StackSlot(float_ty.representation()));
Node* stack_slot_result = graph()->NewNode(
- jsgraph()->machine()->StackSlot(result_type.representation()));
+ jsgraph()->machine()->StackSlot(int_ty.representation()));
const Operator* store_op = jsgraph()->machine()->Store(
- StoreRepresentation(parameter_representation, kNoWriteBarrier));
- *effect_ =
- graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
- input, *effect_, *control_);
+ StoreRepresentation(float_ty.representation(), kNoWriteBarrier));
+ *effect_ = graph()->NewNode(store_op, stack_slot_param, Int32Constant(0),
+ input, *effect_, *control_);
MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
sig_builder.AddReturn(MachineType::Int32());
sig_builder.AddParam(MachineType::Pointer());
sig_builder.AddParam(MachineType::Pointer());
- Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
- ZeroCheck32(wasm::kTrapFloatUnrepresentable,
- BuildCCall(sig_builder.Build(), function, stack_slot_param,
- stack_slot_result),
- position);
- const Operator* load_op = jsgraph()->machine()->Load(result_type);
- Node* load =
- graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
- *effect_, *control_);
- *effect_ = load;
- return load;
+ Node* function =
+ graph()->NewNode(jsgraph()->common()->ExternalConstant(call_ref));
+ Node* overflow = BuildCCall(sig_builder.Build(), function, stack_slot_param,
+ stack_slot_result);
+ if (IsTrappingConvertOp(opcode)) {
+ ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
+ const Operator* load_op = jsgraph()->machine()->Load(int_ty);
+ Node* load = graph()->NewNode(load_op, stack_slot_result, Int32Constant(0),
+ *effect_, *control_);
+ *effect_ = load;
+ return load;
+ }
+ Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
+ Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
+ tl_d.Chain(Control());
+ Node* nan_test = Binop(NeOp(float_ty), input, input);
+ Diamond nan_d(graph(), jsgraph()->common(), nan_test, BranchHint::kFalse);
+ nan_d.Nest(tl_d, true);
+ Node* neg_test = Binop(LtOp(float_ty), input, Zero(this, float_ty));
+ Diamond sat_d(graph(), jsgraph()->common(), neg_test, BranchHint::kNone);
+ sat_d.Nest(nan_d, false);
+ Node* sat_val =
+ sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
+ const Operator* load_op = jsgraph()->machine()->Load(int_ty);
+ Node* load = graph()->NewNode(load_op, stack_slot_result, Int32Constant(0),
+ *effect_, *control_);
+ Node* nan_val =
+ nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
+ return tl_d.Phi(int_ty.representation(), nan_val, load);
}
Node* WasmGraphBuilder::GrowMemory(Node* input) {
@@ -1956,13 +2057,13 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
switch (sig->GetParam(i)) {
case wasm::kWasmF32:
value = graph()->NewNode(m->BitcastFloat32ToInt32(), value);
- // Intentionally fall to next case.
+ V8_FALLTHROUGH;
case wasm::kWasmI32:
BuildEncodeException32BitValue(&index, value);
break;
case wasm::kWasmF64:
value = graph()->NewNode(m->BitcastFloat64ToInt64(), value);
- // Intentionally fall to next case.
+ V8_FALLTHROUGH;
case wasm::kWasmI64: {
Node* upper32 = graph()->NewNode(
m->TruncateInt64ToInt32(),
@@ -2417,10 +2518,10 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
DCHECK_EQ(sizeof...(args), sig->parameter_count());
Node* const call_args[] = {function, args..., *effect_, *control_};
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(jsgraph()->zone(), sig);
- const Operator* op = jsgraph()->common()->Call(desc);
+ const Operator* op = jsgraph()->common()->Call(call_descriptor);
Node* call = graph()->NewNode(op, arraysize(call_args), call_args);
*effect_ = call;
return call;
@@ -2428,8 +2529,12 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
- wasm::WasmCodePosition position) {
- DCHECK_NOT_NULL(wasm_context_);
+ wasm::WasmCodePosition position,
+ Node* wasm_context) {
+ if (wasm_context == nullptr) {
+ DCHECK_NOT_NULL(wasm_context_);
+ wasm_context = wasm_context_.get();
+ }
SetNeedsStackCheck();
const size_t params = sig->parameter_count();
const size_t extra = 3; // wasm_context, effect, and control.
@@ -2440,14 +2545,14 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
// Make room for the wasm_context parameter at index 1, just after code.
memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = wasm_context_.get();
+ args[1] = wasm_context;
// Add effect and control inputs.
args[params + 2] = *effect_;
args[params + 3] = *control_;
- CallDescriptor* descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
- const Operator* op = jsgraph()->common()->Call(descriptor);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ const Operator* op = jsgraph()->common()->Call(call_descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
SetSourcePosition(call, position);
@@ -2501,37 +2606,89 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
uint32_t table_index = 0;
wasm::FunctionSig* sig = env_->module->signatures[sig_index];
- EnsureFunctionTableNodes();
+ Node* table = nullptr;
+ Node* table_size = nullptr;
+ GetFunctionTableNodes(table_index, &table, &table_size);
MachineOperatorBuilder* machine = jsgraph()->machine();
Node* key = args[0];
// Bounds check against the table size.
- Node* size = function_tables_[table_index].size;
- Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
+ Node* in_bounds =
+ graph()->NewNode(machine->Uint32LessThan(), key, table_size);
TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
- Node* table_address = function_tables_[table_index].table_addr;
- Node* table = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), table_address,
- jsgraph()->IntPtrConstant(0), *effect_, *control_);
+
+ // Mask the key to prevent SSCA.
+ if (untrusted_code_mitigations_) {
+ // mask = ((key - size) & ~key) >> 31
+ Node* neg_key =
+ graph()->NewNode(machine->Word32Xor(), key, Int32Constant(-1));
+ Node* masked_diff = graph()->NewNode(
+ machine->Word32And(),
+ graph()->NewNode(machine->Int32Sub(), key, table_size), neg_key);
+ Node* mask =
+ graph()->NewNode(machine->Word32Sar(), masked_diff, Int32Constant(31));
+ key = graph()->NewNode(machine->Word32And(), key, mask);
+ }
+
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
// [sig1, code1, sig2, code2, sig3, code3, ...]
static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
+
+ int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
+ if (WASM_CONTEXT_TABLES) {
+ // The table entries are {IndirectFunctionTableEntry} structs.
+ Node* scaled_key =
+ graph()->NewNode(machine->Int32Mul(), key,
+ Int32Constant(sizeof(IndirectFunctionTableEntry)));
+ const Operator* add = nullptr;
+ if (machine->Is64()) {
+ scaled_key = graph()->NewNode(machine->ChangeInt32ToInt64(), scaled_key);
+ add = machine->Int64Add();
+ } else {
+ add = machine->Int32Add();
+ }
+ Node* entry_address = graph()->NewNode(add, table, scaled_key);
+ Node* loaded_sig = graph()->NewNode(
+ machine->Load(MachineType::Int32()), entry_address,
+ Int32Constant(offsetof(IndirectFunctionTableEntry, sig_id)), *effect_,
+ *control_);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
+ Int32Constant(canonical_sig_num));
+
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+
+ Node* target = graph()->NewNode(
+ machine->Load(MachineType::Pointer()), entry_address,
+ Int32Constant(offsetof(IndirectFunctionTableEntry, target)), *effect_,
+ *control_);
+
+ Node* loaded_context = graph()->NewNode(
+ machine->Load(MachineType::Pointer()), entry_address,
+ Int32Constant(offsetof(IndirectFunctionTableEntry, context)), *effect_,
+ *control_);
+
+ args[0] = target;
+
+ return BuildWasmCall(sig, args, rets, position, loaded_context);
+ }
+
+ // The table entries are elements of a fixed array.
ElementAccess access = AccessBuilder::ForFixedArrayElement();
const int fixed_offset = access.header_size - access.tag();
Node* key_offset = graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2 + 1));
- Node* load_sig =
+ Node* loaded_sig =
graph()->NewNode(machine->Load(MachineType::AnyTagged()), table,
graph()->NewNode(machine->Int32Add(), key_offset,
Int32Constant(fixed_offset)),
*effect_, *control_);
- int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
- CHECK_GE(sig_index, 0);
- Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ CHECK_GE(canonical_sig_num, 0);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
jsgraph()->SmiConstant(canonical_sig_num));
+
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
// Load code object from the table. It is held by a Foreign.
@@ -2540,15 +2697,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
graph()->NewNode(machine->Int32Add(), key_offset,
Uint32Constant(fixed_offset + kPointerSize)),
*effect_, *control_);
- if (FLAG_wasm_jit_to_native) {
- Node* address = graph()->NewNode(
- machine->Load(MachineType::Pointer()), entry,
- Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
- *effect_, *control_);
- args[0] = address;
- } else {
args[0] = entry;
- }
return BuildWasmCall(sig, args, rets, position);
}
@@ -2701,13 +2850,14 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* js_context) {
Callable callable =
Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoProperties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- node, js_context, *effect_, *control_);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), stub_code,
+ node, js_context, *effect_, *control_);
SetSourcePosition(result, 1);
@@ -2853,10 +3003,10 @@ Node* WasmGraphBuilder::BuildAllocateHeapNumberWithValue(Node* value,
graph()->NewNode(common->BeginRegion(RegionObservability::kNotObservable),
graph()->start());
if (!allocate_heap_number_operator_.is_set()) {
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoThrow);
- allocate_heap_number_operator_.set(common->Call(descriptor));
+ allocate_heap_number_operator_.set(common->Call(call_descriptor));
}
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
target, js_context, effect, control);
@@ -2932,9 +3082,10 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
// We only need a dummy call descriptor.
wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
- CallDescriptor* desc =
+ auto call_descriptor =
GetWasmCallDescriptor(jsgraph()->zone(), dummy_sig_builder.Build());
- *effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ *effect_ =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos, args);
Return(jsgraph()->UndefinedConstant());
return;
}
@@ -2957,9 +3108,10 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
args[pos++] = *control_;
// Call the wasm code.
- CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ Node* call =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), count, args);
*effect_ = call;
// Clear the ThreadInWasmFlag
@@ -3017,7 +3169,7 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
// Build the start and the parameter nodes.
Isolate* isolate = jsgraph()->isolate();
- CallDescriptor* desc;
+ CallDescriptor* call_descriptor;
Node* start = Start(wasm_count + 3);
*effect_ = start;
*control_ = start;
@@ -3067,7 +3219,7 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
handle(isolate->heap()->undefined_value(), isolate));
}
- desc = Linkage::GetJSCallDescriptor(
+ call_descriptor = Linkage::GetJSCallDescriptor(
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
// Convert wasm numbers to JS values.
@@ -3079,7 +3231,8 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
args[pos++] = *effect_;
args[pos++] = *control_;
- call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos,
+ args);
}
}
@@ -3094,9 +3247,9 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
args[pos++] = jsgraph()->Constant(
handle(isolate->heap()->undefined_value(), isolate)); // receiver
- desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
- callable.descriptor(), wasm_count + 1,
- CallDescriptor::kNoFlags);
+ call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), wasm_count + 1,
+ CallDescriptor::kNoFlags);
// Convert wasm numbers to JS values.
pos = AddParameterNodes(args, pos, wasm_count, sig_);
@@ -3111,7 +3264,8 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
args[pos++] = *effect_;
args[pos++] = *control_;
- call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ call =
+ graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos, args);
}
*effect_ = call;
@@ -3173,9 +3327,9 @@ void WasmGraphBuilder::BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code,
args[pos++] = *control_;
// Tail-call the wasm code.
- CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* tail_call =
- graph()->NewNode(jsgraph()->common()->TailCall(desc), count, args);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
+ Node* tail_call = graph()->NewNode(
+ jsgraph()->common()->TailCall(call_descriptor), count, args);
MergeControlToEnd(jsgraph(), tail_call);
}
@@ -3246,17 +3400,12 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(uint32_t func_index) {
if (HasInt64ParamOrReturn(sig_)) LowerInt64();
}
-void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
+void WasmGraphBuilder::BuildCWasmEntry() {
// Build the start and the JS parameter nodes.
Node* start = Start(CWasmEntryParameters::kNumParameters + 5);
*control_ = start;
*effect_ = start;
- // Create the wasm_context node to pass as parameter.
- DCHECK_NULL(wasm_context_);
- wasm_context_ = jsgraph()->IntPtrConstant(
- reinterpret_cast<uintptr_t>(wasm_context_address));
-
// Create parameter nodes (offset by 1 for the receiver parameter).
Node* code_obj = nullptr;
if (FLAG_wasm_jit_to_native) {
@@ -3269,6 +3418,7 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
} else {
code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
}
+ Node* wasm_context = Param(CWasmEntryParameters::kWasmContext + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
@@ -3277,7 +3427,7 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
int pos = 0;
args[pos++] = code_obj;
- args[pos++] = wasm_context_.get();
+ args[pos++] = wasm_context;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -3294,10 +3444,10 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
DCHECK_EQ(arg_count, pos);
// Call the wasm code.
- CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
+ auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* call =
- graph()->NewNode(jsgraph()->common()->Call(desc), arg_count, args);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor),
+ arg_count, args);
*effect_ = call;
// Store the return value.
@@ -3495,20 +3645,44 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
}
-void WasmGraphBuilder::EnsureFunctionTableNodes() {
- if (function_tables_.size() > 0) return;
- size_t tables_size = env_->function_tables.size();
- for (size_t i = 0; i < tables_size; ++i) {
- wasm::GlobalHandleAddress function_handle_address =
- env_->function_tables[i];
- Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(function_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE);
- uint32_t table_size = env_->module->function_tables[i].initial_size;
- Node* size = jsgraph()->RelocatableInt32Constant(
- static_cast<uint32_t>(table_size),
- RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
- function_tables_.push_back({table_addr, size});
+void WasmGraphBuilder::GetFunctionTableNodes(uint32_t table_index, Node** table,
+ Node** table_size) {
+ if (WASM_CONTEXT_TABLES) {
+ // The table address and size are stored in the WasmContext.
+ // Don't bother caching them, since they are only used in indirect calls,
+ // which would cause them to be spilled on the stack anyway.
+ *table = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, table))),
+ *effect_, *control_);
+ *table_size = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
+ jsgraph()->Int32Constant(
+ static_cast<int32_t>(offsetof(WasmContext, table_size))),
+ *effect_, *control_);
+ } else {
+ // The function table nodes are relocatable constants.
+ if (function_tables_.size() == 0) {
+ size_t tables_size = env_->function_tables.size();
+ for (size_t i = 0; i < tables_size; ++i) {
+ wasm::GlobalHandleAddress function_handle_address =
+ env_->function_tables[i];
+ Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(function_handle_address),
+ RelocInfo::WASM_GLOBAL_HANDLE);
+ uint32_t table_size = env_->module->function_tables[i].initial_size;
+ Node* size = jsgraph()->RelocatableInt32Constant(
+ static_cast<uint32_t>(table_size),
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+ function_tables_.push_back({table_addr, size});
+ }
+ }
+ *table_size = function_tables_[table_index].size;
+ *table =
+ graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ function_tables_[table_index].table_addr,
+ jsgraph()->IntPtrConstant(0), *effect_, *control_);
}
}
@@ -3539,7 +3713,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
Node** parameters,
int parameter_count) {
const Runtime::Function* fun = Runtime::FunctionForId(f);
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
CallDescriptor::kNoFlags);
// CEntryStubConstant nodes have to be created and cached in the main
@@ -3562,8 +3736,8 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
inputs[count++] = *effect_;
inputs[count++] = *control_;
- Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->Call(desc),
- count, inputs);
+ Node* node = jsgraph()->graph()->NewNode(
+ jsgraph()->common()->Call(call_descriptor), count, inputs);
*effect_ = node;
return node;
@@ -4370,22 +4544,22 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
#define ATOMIC_BINOP_LIST(V) \
V(I32AtomicAdd, Add, Uint32) \
- V(I32AtomicSub, Sub, Uint32) \
- V(I32AtomicAnd, And, Uint32) \
- V(I32AtomicOr, Or, Uint32) \
- V(I32AtomicXor, Xor, Uint32) \
- V(I32AtomicExchange, Exchange, Uint32) \
V(I32AtomicAdd8U, Add, Uint8) \
- V(I32AtomicSub8U, Sub, Uint8) \
- V(I32AtomicAnd8U, And, Uint8) \
- V(I32AtomicOr8U, Or, Uint8) \
- V(I32AtomicXor8U, Xor, Uint8) \
- V(I32AtomicExchange8U, Exchange, Uint8) \
V(I32AtomicAdd16U, Add, Uint16) \
+ V(I32AtomicSub, Sub, Uint32) \
+ V(I32AtomicSub8U, Sub, Uint8) \
V(I32AtomicSub16U, Sub, Uint16) \
+ V(I32AtomicAnd, And, Uint32) \
+ V(I32AtomicAnd8U, And, Uint8) \
V(I32AtomicAnd16U, And, Uint16) \
+ V(I32AtomicOr, Or, Uint32) \
+ V(I32AtomicOr8U, Or, Uint8) \
V(I32AtomicOr16U, Or, Uint16) \
+ V(I32AtomicXor, Xor, Uint32) \
+ V(I32AtomicXor8U, Xor, Uint8) \
V(I32AtomicXor16U, Xor, Uint16) \
+ V(I32AtomicExchange, Exchange, Uint32) \
+ V(I32AtomicExchange8U, Exchange, Uint8) \
V(I32AtomicExchange16U, Exchange, Uint16)
#define ATOMIC_TERNARY_LIST(V) \
@@ -4409,15 +4583,15 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
@@ -4428,7 +4602,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
break; \
}
@@ -4441,22 +4615,22 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
- jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
+ jsgraph()->machine()->Word32AtomicLoad(MachineType::Type()), \
MemBuffer(offset), index, *effect_, *control_); \
break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Word32AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
@@ -4568,7 +4742,7 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
#endif
if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"%.*s", func_name.length(), func_name.start());
}
@@ -4701,7 +4875,7 @@ Handle<Code> CompileWasmToJSWrapper(
#endif
if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"%.*s", func_name.length(), func_name.start());
}
@@ -4774,7 +4948,7 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
buffer.Dispose();
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"wasm-to-wasm");
}
@@ -4837,7 +5011,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
#endif
if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
"%.*s", func_name.length(), func_name.start());
}
}
@@ -4852,8 +5026,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
return code;
}
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
- Address wasm_context_address) {
+Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
@@ -4870,7 +5043,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
CEntryStub(isolate, 1).GetCode(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildCWasmEntry(wasm_context_address);
+ builder.BuildCWasmEntry();
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
@@ -5051,7 +5224,7 @@ void WasmCompilationUnit::ExecuteCompilation() {
liftoff_.~LiftoffData();
mode_ = WasmCompilationUnit::CompilationMode::kTurbofan;
new (&tf_) TurbofanData();
- // fall-through
+ V8_FALLTHROUGH;
case WasmCompilationUnit::CompilationMode::kTurbofan:
ExecuteTurbofanCompilation();
break;
@@ -5089,19 +5262,20 @@ void WasmCompilationUnit::ExecuteTurbofanCompilation() {
tf_.compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
// Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor =
+ auto call_descriptor =
GetWasmCallDescriptor(tf_.compilation_zone_.get(), func_body_.sig);
if (tf_.jsgraph_->machine()->Is32()) {
- descriptor =
- GetI32WasmCallDescriptor(tf_.compilation_zone_.get(), descriptor);
+ call_descriptor = GetI32WasmCallDescriptor(tf_.compilation_zone_.get(),
+ call_descriptor);
}
tf_.info_.reset(new CompilationInfo(
GetDebugName(tf_.compilation_zone_.get(), func_name_, func_index_),
tf_.compilation_zone_.get(), Code::WASM_FUNCTION));
tf_.job_.reset(Pipeline::NewWasmCompilationJob(
- tf_.info_.get(), isolate_, tf_.jsgraph_, descriptor, source_positions,
- protected_instructions_.get(), env_->module->origin()));
+ tf_.info_.get(), isolate_, tf_.jsgraph_, call_descriptor,
+ source_positions, protected_instructions_.get(),
+ env_->module->origin()));
ok_ = tf_.job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
@@ -5195,11 +5369,11 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
func_index_,
tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
- std::move(protected_instructions_));
+ tf_.job_->compilation_info()->wasm_code_desc()->handler_table_offset,
+ std::move(protected_instructions_), false);
if (!code) {
return WasmCodeWrapper(code);
}
- // TODO(mtrofin): add CodeEventListener call - see the non-native case.
if (FLAG_trace_wasm_decode_time) {
double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
@@ -5207,17 +5381,14 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
codegen_ms);
}
+ PROFILE(isolate_,
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code, func_name_));
+
Handle<ByteArray> source_positions =
tf_.job_->compilation_info()->wasm_code_desc()->source_positions_table;
- MaybeHandle<HandlerTable> handler_table =
- tf_.job_->compilation_info()->wasm_code_desc()->handler_table;
native_module_->compiled_module()->source_positions()->set(
func_index_, *source_positions);
- if (!handler_table.is_null()) {
- native_module_->compiled_module()->handler_table()->set(
- func_index_, *handler_table.ToHandleChecked());
- }
#ifdef ENABLE_DISASSEMBLER
// Note: only do this after setting source positions, as this will be
@@ -5272,8 +5443,7 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
Handle<Code> code;
code = isolate_->factory()->NewCode(
desc, Code::WASM_FUNCTION, code, Builtins::kNoBuiltinId,
- MaybeHandle<HandlerTable>(), source_positions,
- MaybeHandle<DeoptimizationData>(), kMovable,
+ source_positions, MaybeHandle<DeoptimizationData>(), kMovable,
0, // stub_key
false, // is_turbofanned
liftoff_.asm_.GetTotalFrameSlotCount(), // stack_slots
@@ -5287,14 +5457,16 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
PackProtectedInstructions(code);
ret = WasmCodeWrapper(code);
} else {
- // TODO(mtrofin): figure a way to raise events.
- // Consider lifting it to FinishCompilation.
+ // TODO(herhut) Consider lifting it to FinishCompilation.
native_module_->compiled_module()->source_positions()->set(
func_index_, *source_positions);
- ret = WasmCodeWrapper(
+ wasm::WasmCode* code =
native_module_->AddCode(desc, liftoff_.asm_.GetTotalFrameSlotCount(),
func_index_, liftoff_.safepoint_table_offset_,
- std::move(protected_instructions_), true));
+ 0, std::move(protected_instructions_), true);
+ PROFILE(isolate_,
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code, func_name_));
+ ret = WasmCodeWrapper(code);
}
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code || FLAG_print_wasm_code) {
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 22a2e1071e..e23fd4fe14 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -227,6 +227,7 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
enum CWasmEntryParameters {
kCodeObject,
+ kWasmContext,
kArgumentsBuffer,
// marker:
kNumParameters
@@ -235,8 +236,7 @@ enum CWasmEntryParameters {
// Compiles a stub with JS linkage, taking parameters as described by
// {CWasmEntryParameters}. It loads the wasm parameters from the argument
// buffer and calls the wasm function given as first parameter.
-Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
- Address wasm_context_address);
+Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
// Values from the {WasmContext} are cached between WASM-level function calls.
// This struct allows the SSA environment handling this cache to be defined
@@ -255,8 +255,6 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
- struct IntConvertOps;
- struct FloatConvertOps;
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
Handle<Code> centry_stub, wasm::FunctionSig* sig,
@@ -369,12 +367,13 @@ class WasmGraphBuilder {
void BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code_start,
Address new_wasm_context_address);
void BuildWasmInterpreterEntry(uint32_t func_index);
- void BuildCWasmEntry(Address wasm_context_address);
+ void BuildCWasmEntry();
Node* ToJS(Node* node, wasm::ValueType type);
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type);
Node* Invert(Node* node);
- void EnsureFunctionTableNodes();
+ void GetFunctionTableNodes(uint32_t table_index, Node** table,
+ Node** table_size);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
@@ -451,8 +450,10 @@ class WasmGraphBuilder {
bool use_trap_handler() const { return env_ && env_->use_trap_handler; }
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph();
+
private:
- enum class NumericImplementation : uint8_t { kTrap, kSaturate };
static const int kDefaultBufferSize = 16;
Zone* const zone_;
@@ -487,10 +488,6 @@ class WasmGraphBuilder {
compiler::SourcePositionTable* const source_position_table_ = nullptr;
- // Internal helper methods.
- JSGraph* jsgraph() { return jsgraph_; }
- Graph* graph();
-
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
@@ -510,29 +507,14 @@ class WasmGraphBuilder {
template <typename... Args>
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position);
+ wasm::WasmCodePosition position,
+ Node* wasm_context = nullptr);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
- Node* BuildI32ConvertOp(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl, const Operator* op,
- wasm::WasmOpcode check_op,
- const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops);
- Node* BuildConvertCheck(Node* test, Node* result, Node* input,
- wasm::WasmCodePosition position,
- NumericImplementation impl,
- const IntConvertOps* int_ops,
- const FloatConvertOps* float_ops);
- Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
- Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
- Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
- Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position,
- NumericImplementation impl);
+ Node* BuildIntConvertFloat(Node* input, wasm::WasmCodePosition position,
+ wasm::WasmOpcode);
Node* BuildI32Ctz(Node* input);
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
@@ -567,14 +549,8 @@ class WasmGraphBuilder {
Node* BuildF64SConvertI64(Node* input);
Node* BuildF64UConvertI64(Node* input);
- Node* BuildFloatToIntConversionInstruction(
- Node* input, ExternalReference ref,
- MachineRepresentation parameter_representation,
- const MachineType result_type, wasm::WasmCodePosition position);
- Node* BuildI64SConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI64UConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI64SConvertF64(Node* input, wasm::WasmCodePosition position);
- Node* BuildI64UConvertF64(Node* input, wasm::WasmCodePosition position);
+ Node* BuildCcallConvertFloat(Node* input, wasm::WasmCodePosition position,
+ wasm::WasmOpcode opcode);
Node* BuildI32DivS(Node* left, Node* right, wasm::WasmCodePosition position);
Node* BuildI32RemS(Node* left, Node* right, wasm::WasmCodePosition position);
@@ -657,9 +633,9 @@ constexpr int kWasmContextParameterIndex = 0;
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, wasm::FunctionSig* signature);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
- Zone* zone, CallDescriptor* descriptor);
+ Zone* zone, CallDescriptor* call_descriptor);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
- Zone* zone, CallDescriptor* descriptor);
+ Zone* zone, CallDescriptor* call_descriptor);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index e7bb3c164a..cef127f334 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -280,77 +280,77 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
}
CallDescriptor* ReplaceTypeInCallDescriptorWith(
- Zone* zone, CallDescriptor* descriptor, size_t num_replacements,
+ Zone* zone, CallDescriptor* call_descriptor, size_t num_replacements,
MachineType input_type, MachineRepresentation output_type) {
- size_t parameter_count = descriptor->ParameterCount();
- size_t return_count = descriptor->ReturnCount();
- for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
- if (descriptor->GetParameterType(i) == input_type) {
+ size_t parameter_count = call_descriptor->ParameterCount();
+ size_t return_count = call_descriptor->ReturnCount();
+ for (size_t i = 0; i < call_descriptor->ParameterCount(); i++) {
+ if (call_descriptor->GetParameterType(i) == input_type) {
parameter_count += num_replacements - 1;
}
}
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t i = 0; i < call_descriptor->ReturnCount(); i++) {
+ if (call_descriptor->GetReturnType(i) == input_type) {
return_count += num_replacements - 1;
}
}
- if (parameter_count == descriptor->ParameterCount() &&
- return_count == descriptor->ReturnCount()) {
- return descriptor;
+ if (parameter_count == call_descriptor->ParameterCount() &&
+ return_count == call_descriptor->ReturnCount()) {
+ return call_descriptor;
}
LocationSignature::Builder locations(zone, return_count, parameter_count);
Allocator params = parameter_registers;
- for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
- if (descriptor->GetParameterType(i) == input_type) {
+ for (size_t i = 0; i < call_descriptor->ParameterCount(); i++) {
+ if (call_descriptor->GetParameterType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
locations.AddParam(params.Next(output_type));
}
} else {
locations.AddParam(
- params.Next(descriptor->GetParameterType(i).representation()));
+ params.Next(call_descriptor->GetParameterType(i).representation()));
}
}
Allocator rets = return_registers;
rets.AdjustStackOffset(params.stack_offset);
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t i = 0; i < call_descriptor->ReturnCount(); i++) {
+ if (call_descriptor->GetReturnType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
locations.AddReturn(rets.Next(output_type));
}
} else {
locations.AddReturn(
- rets.Next(descriptor->GetReturnType(i).representation()));
+ rets.Next(call_descriptor->GetReturnType(i).representation()));
}
}
- return new (zone) CallDescriptor( // --
- descriptor->kind(), // kind
- descriptor->GetInputType(0), // target MachineType
- descriptor->GetInputLocation(0), // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- descriptor->properties(), // properties
- descriptor->CalleeSavedRegisters(), // callee-saved registers
- descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
- descriptor->flags(), // flags
- descriptor->debug_name(), // debug name
- descriptor->AllocatableRegisters(), // allocatable registers
- rets.stack_offset - params.stack_offset); // stack_return_count
+ return new (zone) CallDescriptor( // --
+ call_descriptor->kind(), // kind
+ call_descriptor->GetInputType(0), // target MachineType
+ call_descriptor->GetInputLocation(0), // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ call_descriptor->properties(), // properties
+ call_descriptor->CalleeSavedRegisters(), // callee-saved registers
+ call_descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
+ call_descriptor->flags(), // flags
+ call_descriptor->debug_name(), // debug name
+ call_descriptor->AllocatableRegisters(), // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
- CallDescriptor* descriptor) {
- return ReplaceTypeInCallDescriptorWith(zone, descriptor, 2,
+ CallDescriptor* call_descriptor) {
+ return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 2,
MachineType::Int64(),
MachineRepresentation::kWord32);
}
-CallDescriptor* GetI32WasmCallDescriptorForSimd(Zone* zone,
- CallDescriptor* descriptor) {
- return ReplaceTypeInCallDescriptorWith(zone, descriptor, 4,
+CallDescriptor* GetI32WasmCallDescriptorForSimd(
+ Zone* zone, CallDescriptor* call_descriptor) {
+ return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 4,
MachineType::Simd128(),
MachineRepresentation::kWord32);
}
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index bc92f9707c..cc6d758a9a 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -296,13 +296,25 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
X64OperandConverter& i, int pc) {
- const X64MemoryProtection protection =
- static_cast<X64MemoryProtection>(MiscField::decode(opcode));
- if (protection == X64MemoryProtection::kProtected) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessProtected) {
const bool frame_elided = !codegen->frame_access_state()->has_frame();
new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, instr);
}
}
+
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ X64OperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->andq(value, kSpeculationPoisonRegister);
+ }
+}
+
} // namespace
@@ -570,32 +582,44 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
first_unused_stack_slot);
}
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ __ ComputeCodeStartAddress(rbx);
+ __ cmpq(rbx, kJavaScriptCallCodeStartRegister);
+ __ Assert(equal, AbortReason::kWrongFunctionCodeStart);
+}
+
// Check if the code object is marked for deoptimization. If it is, then it
-// jumps to CompileLazyDeoptimizedCode builtin. In order to do this we need to:
-// 1. load the address of the current instruction;
-// 2. read from memory the word that contains that bit, which can be found in
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
-// 3. test kMarkedForDeoptimizationBit in those flags; and
-// 4. if it is not zero then it jumps to the builtin.
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // Load effective address to get the address of the current instruction into
- // rcx.
- __ leaq(rcx, Operand(&current));
- __ bind(&current);
- int pc = __ pc_offset();
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ movp(rcx, Operand(rcx, offset));
- __ testl(FieldOperand(rcx, CodeDataContainer::kKindSpecificFlagsOffset),
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ movp(rbx, Operand(kJavaScriptCallCodeStartRegister, offset));
+ __ testl(FieldOperand(rbx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
-inline bool HasCallDescriptorFlag(Instruction* instr,
- CallDescriptor::Flag flag) {
- return MiscField::decode(instr->opcode()) & flag;
+void CodeGenerator::GenerateSpeculationPoison() {
+ // Set a mask which has all bits set in the normal case, but has all
+ // bits cleared if we are speculatively executing the wrong PC.
+ __ ComputeCodeStartAddress(rbx);
+ __ movp(kSpeculationPoisonRegister, Immediate(0));
+ __ cmpp(kJavaScriptCallCodeStartRegister, rbx);
+ __ movp(rbx, Immediate(-1));
+ __ cmovq(equal, kSpeculationPoisonRegister, rbx);
+}
+
+void CodeGenerator::AssembleRegisterArgumentPoisoning() {
+ __ andq(kJSFunctionRegister, kSpeculationPoisonRegister);
+ __ andq(kContextRegister, kSpeculationPoisonRegister);
+ __ andq(rsp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -697,6 +721,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister)) {
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
+ DCHECK_EQ(rcx, reg);
+ }
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
@@ -714,6 +742,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
@@ -1747,20 +1776,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1771,17 +1804,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ movb(operand, i.InputRegister(index));
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1791,6 +1827,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1801,6 +1838,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ movw(operand, i.InputRegister(index));
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movl:
@@ -1825,10 +1863,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(operand, i.InputRegister(index));
}
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -1843,6 +1883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movq(operand, i.InputRegister(index));
}
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -2064,9 +2105,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputAt(0)->IsFPRegister()) {
- __ Movss(dst, i.InputDoubleRegister(0));
+ __ movss(dst, i.InputDoubleRegister(0));
} else {
- __ Movss(dst, i.InputOperand(0));
+ __ movss(dst, i.InputOperand(0));
}
__ shufps(dst, dst, 0x0);
break;
@@ -2085,6 +2126,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2), select);
break;
}
+ case kX64F32x4Abs: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ andps(i.OutputSimd128Register(), kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ psrld(dst, 1);
+ __ andps(dst, i.InputSimd128Register(0));
+ }
+ break;
+ }
+ case kX64F32x4Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pslld(kScratchDoubleReg, 31);
+ __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pslld(dst, 31);
+ __ xorps(dst, i.InputSimd128Register(0));
+ }
+ break;
+ }
case kX64F32x4RecipApprox: {
__ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -2098,6 +2167,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ addps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64F32x4AddHoriz: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ haddps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64F32x4Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ subps(i.OutputSimd128Register(), i.InputSimd128Register(1));
@@ -2271,7 +2345,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
__ pshuflw(dst, dst, 0x0);
- __ pshufhw(dst, dst, 0x0);
__ pshufd(dst, dst, 0x0);
break;
}
@@ -2571,77 +2644,77 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
- case kAtomicExchangeInt8: {
+ case kWord32AtomicExchangeInt8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint8: {
+ case kWord32AtomicExchangeUint8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movzxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeInt16: {
+ case kWord32AtomicExchangeInt16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movsxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeUint16: {
+ case kWord32AtomicExchangeUint16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movzxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kAtomicExchangeWord32: {
+ case kWord32AtomicExchangeWord32: {
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kAtomicCompareExchangeInt8: {
+ case kWord32AtomicCompareExchangeInt8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movsxbl(rax, rax);
break;
}
- case kAtomicCompareExchangeUint8: {
+ case kWord32AtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movzxbl(rax, rax);
break;
}
- case kAtomicCompareExchangeInt16: {
+ case kWord32AtomicCompareExchangeInt16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movsxwl(rax, rax);
break;
}
- case kAtomicCompareExchangeUint16: {
+ case kWord32AtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movzxwl(rax, rax);
break;
}
- case kAtomicCompareExchangeWord32: {
+ case kWord32AtomicCompareExchangeWord32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kAtomic##op##Int8: \
+ case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
__ movsxbl(rax, rax); \
break; \
- case kAtomic##op##Uint8: \
+ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
__ movzxbl(rax, rax); \
break; \
- case kAtomic##op##Int16: \
+ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
__ movsxwl(rax, rax); \
break; \
- case kAtomic##op##Uint16: \
+ case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
__ movzxwl(rax, rax); \
break; \
- case kAtomic##op##Word32: \
+ case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
break;
ATOMIC_BINOP_CASE(Add, addl)
@@ -2650,14 +2723,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orl)
ATOMIC_BINOP_CASE(Xor, xorl)
#undef ATOMIC_BINOP_CASE
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
+ case kWord32AtomicLoadInt8:
+ case kWord32AtomicLoadUint8:
+ case kWord32AtomicLoadInt16:
+ case kWord32AtomicLoadUint16:
+ case kWord32AtomicLoadWord32:
+ case kWord32AtomicStoreWord8:
+ case kWord32AtomicStoreWord16:
+ case kWord32AtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
@@ -2700,6 +2773,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
+#undef ASSEMBLE_UNOP
+#undef ASSEMBLE_BINOP
+#undef ASSEMBLE_COMPARE
+#undef ASSEMBLE_MULT
+#undef ASSEMBLE_SHIFT
+#undef ASSEMBLE_MOVX
+#undef ASSEMBLE_SSE_BINOP
+#undef ASSEMBLE_SSE_UNOP
+#undef ASSEMBLE_AVX_BINOP
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+#undef ASSEMBLE_ATOMIC_BINOP
+
} // namespace
// Assembles branches after this instruction.
@@ -2718,6 +2804,19 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
+ Instruction* instr) {
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ movl(kScratchRegister, Immediate(0));
+ __ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister,
+ kScratchRegister);
+}
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
Label::Distance flabel_distance =
@@ -2799,8 +2898,8 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ isolate()),
0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
- CallDescriptor* descriptor = gen_->linkage()->GetIncomingDescriptor();
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
// Use rcx as a scratch register, we return anyways immediately.
__ Ret(static_cast<int>(pop_size), rcx);
} else {
@@ -2858,7 +2957,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ bind(&done);
}
-
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
Register input = i.InputRegister(0);
@@ -2892,9 +2990,9 @@ static const int kQuadWordSize = 16;
} // namespace
void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
frame->AlignSavedCalleeRegisterSlots();
if (saves_fp != 0) { // Save callee-saved XMM registers.
@@ -2903,7 +3001,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
(kQuadWordSize / kPointerSize));
}
}
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
int count = 0;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
@@ -2916,16 +3014,16 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
int pc_base = __ pc_offset();
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
- } else if (descriptor->IsJSFunctionCall()) {
+ } else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (descriptor->PushArgumentCount()) {
+ if (call_descriptor->PushArgumentCount()) {
__ pushq(kJavaScriptCallArgCountRegister);
}
} else {
@@ -2934,8 +3032,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+ int shrink_slots = frame()->GetTotalFrameSlotCount() -
+ call_descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -2948,10 +3046,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
+ InitializePoisonForLoadsIfNeeded();
}
- const RegList saves = descriptor->CalleeSavedRegisters();
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
if (info()->IsWasm() && shrink_slots > 128) {
@@ -3024,10 +3123,10 @@ void CodeGenerator::AssembleConstructFrame() {
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
// Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3038,7 +3137,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ popq(Register::from_code(i));
}
}
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
@@ -3058,11 +3157,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Might need rcx for scratch if pop_size is too big or if there is a variable
// pop count.
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rdx.bit());
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rcx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rdx.bit());
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
X64OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
+ if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
@@ -3097,147 +3196,159 @@ void CodeGenerator::FinishCode() {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
X64OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- __ movq(g.ToRegister(destination), src);
- } else {
- __ movq(g.ToOperand(destination), src);
- }
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = g.ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ movq(dst, src);
- } else {
- // Spill on demand to use a temporary register for memory-to-memory
- // moves.
- Register tmp = kScratchRegister;
- Operand dst = g.ToOperand(destination);
- __ movq(tmp, src);
- __ movq(dst, tmp);
- }
- } else if (source->IsConstant()) {
- ConstantOperand* constant_source = ConstantOperand::cast(source);
- Constant src = g.ToConstant(constant_source);
- if (destination->IsRegister() || destination->IsStackSlot()) {
- Register dst = destination->IsRegister() ? g.ToRegister(destination)
- : kScratchRegister;
- switch (src.type()) {
- case Constant::kInt32: {
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
- __ movq(dst, src.ToInt64(), src.rmode());
- } else {
- int32_t value = src.ToInt32();
- if (RelocInfo::IsWasmSizeReference(src.rmode())) {
- __ movl(dst, Immediate(value, src.rmode()));
- } else if (value == 0) {
- __ xorl(dst, dst);
- } else {
- __ movl(dst, Immediate(value));
- }
- }
- break;
- }
- case Constant::kInt64:
- if (RelocInfo::IsWasmPtrReference(src.rmode())) {
- __ movq(dst, src.ToInt64(), src.rmode());
- } else {
- DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
- __ Set(dst, src.ToInt64());
- }
- break;
- case Constant::kFloat32:
- __ MoveNumber(dst, src.ToFloat32());
- break;
- case Constant::kFloat64:
- __ MoveNumber(dst, src.ToFloat64().value());
- break;
- case Constant::kExternalReference:
- __ Move(dst, src.ToExternalReference());
- break;
- case Constant::kHeapObject: {
- Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
- if (IsMaterializableFromRoot(src_object, &index)) {
- __ LoadRoot(dst, index);
+ // Helper function to write the given constant to the dst register.
+ auto MoveConstantToRegister = [&](Register dst, Constant src) {
+ switch (src.type()) {
+ case Constant::kInt32: {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ int32_t value = src.ToInt32();
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
+ __ movl(dst, Immediate(value, src.rmode()));
+ } else if (value == 0) {
+ __ xorl(dst, dst);
} else {
- __ Move(dst, src_object);
+ __ movl(dst, Immediate(value));
}
- break;
}
- case Constant::kRpoNumber:
- UNREACHABLE(); // TODO(dcarney): load of labels on x64.
- break;
+ break;
}
- if (destination->IsStackSlot()) {
- __ movq(g.ToOperand(destination), kScratchRegister);
+ case Constant::kInt64:
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
+ __ Set(dst, src.ToInt64());
+ }
+ break;
+ case Constant::kFloat32:
+ __ MoveNumber(dst, src.ToFloat32());
+ break;
+ case Constant::kFloat64:
+ __ MoveNumber(dst, src.ToFloat64().value());
+ break;
+ case Constant::kExternalReference:
+ __ Move(dst, src.ToExternalReference());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object);
+ }
+ break;
}
- } else if (src.type() == Constant::kFloat32) {
- // TODO(turbofan): Can we do better here?
- uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
- if (destination->IsFPRegister()) {
- __ Move(g.ToDoubleRegister(destination), src_const);
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): load of labels on x64.
+ break;
+ }
+ };
+ // Dispatch on the source and destination operand kinds.
+ switch (MoveType::InferMove(source, destination)) {
+ case MoveType::kRegisterToRegister:
+ if (source->IsRegister()) {
+ __ movq(g.ToRegister(destination), g.ToRegister(source));
} else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- __ movl(dst, Immediate(src_const));
+ DCHECK(source->IsFPRegister());
+ __ Movapd(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
}
- } else {
- DCHECK_EQ(Constant::kFloat64, src.type());
- uint64_t src_const = src.ToFloat64().AsUint64();
- if (destination->IsFPRegister()) {
- __ Move(g.ToDoubleRegister(destination), src_const);
+ return;
+ case MoveType::kRegisterToStack: {
+ Operand dst = g.ToOperand(destination);
+ if (source->IsRegister()) {
+ __ movq(dst, g.ToRegister(source));
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(dst, src);
+ } else {
+ __ Movups(dst, src);
+ }
+ }
+ return;
+ }
+ case MoveType::kStackToRegister: {
+ Operand src = g.ToOperand(source);
+ if (source->IsStackSlot()) {
+ __ movq(g.ToRegister(destination), src);
} else {
- DCHECK(destination->IsFPStackSlot());
- __ movq(kScratchRegister, src_const);
- __ movq(g.ToOperand(destination), kScratchRegister);
+ DCHECK(source->IsFPStackSlot());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(dst, src);
+ } else {
+ __ Movups(dst, src);
+ }
}
+ return;
}
- } else if (source->IsFPRegister()) {
- XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movapd(dst, src);
- } else {
- DCHECK(destination->IsFPStackSlot());
+ case MoveType::kStackToStack: {
+ Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
- MachineRepresentation rep =
- LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(dst, src);
- } else {
- __ Movups(dst, src);
+ if (source->IsStackSlot()) {
+ // Spill on demand to use a temporary register for memory-to-memory
+ // moves.
+ __ movq(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
+ } else {
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(dst, kScratchDoubleReg);
+ } else {
+ DCHECK(source->IsSimd128StackSlot());
+ __ Movups(kScratchDoubleReg, src);
+ __ Movups(dst, kScratchDoubleReg);
+ }
}
+ return;
}
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- Operand src = g.ToOperand(source);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (destination->IsFPRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(dst, src);
+ case MoveType::kConstantToRegister: {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister()) {
+ MoveConstantToRegister(g.ToRegister(destination), src);
} else {
- __ Movups(dst, src);
+ DCHECK(destination->IsFPRegister());
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ if (src.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ __ Move(dst, bit_cast<uint32_t>(src.ToFloat32()));
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ __ Move(dst, src.ToFloat64().AsUint64());
+ }
}
- } else {
+ return;
+ }
+ case MoveType::kConstantToStack: {
+ Constant src = g.ToConstant(source);
Operand dst = g.ToOperand(destination);
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(kScratchDoubleReg, src);
- __ Movsd(dst, kScratchDoubleReg);
+ if (destination->IsStackSlot()) {
+ MoveConstantToRegister(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
} else {
- __ Movups(kScratchDoubleReg, src);
- __ Movups(dst, kScratchDoubleReg);
+ DCHECK(destination->IsFPStackSlot());
+ if (src.type() == Constant::kFloat32) {
+ __ movl(dst, Immediate(bit_cast<uint32_t>(src.ToFloat32())));
+ } else {
+ DCHECK_EQ(src.type(), Constant::kFloat64);
+ __ movq(kScratchRegister, src.ToFloat64().AsUint64());
+ __ movq(dst, kScratchRegister);
+ }
}
+ return;
}
- } else {
- UNREACHABLE();
}
+ UNREACHABLE();
}
@@ -3246,88 +3357,95 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = g.ToRegister(source);
- Register dst = g.ToRegister(destination);
- __ movq(kScratchRegister, src);
- __ movq(src, dst);
- __ movq(dst, kScratchRegister);
- } else if (source->IsRegister() && destination->IsStackSlot()) {
- Register src = g.ToRegister(source);
- __ pushq(src);
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- Operand dst = g.ToOperand(destination);
- __ movq(src, dst);
- frame_access_state()->IncreaseSPDelta(-1);
- dst = g.ToOperand(destination);
- __ popq(dst);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsFPStackSlot() && destination->IsFPStackSlot())) {
- // Memory-memory.
- Operand src = g.ToOperand(source);
- Operand dst = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- Register tmp = kScratchRegister;
- __ movq(tmp, dst);
- __ pushq(src); // Then use stack to copy src to destination.
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- __ popq(dst);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- __ movq(src, tmp);
- } else {
- // Without AVX, misaligned reads and writes will trap. Move using the
- // stack, in two parts.
- __ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
- __ pushq(src); // Then use stack to copy src to destination.
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- __ popq(dst);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- __ pushq(g.ToOperand(source, kPointerSize));
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kPointerSize);
- __ popq(g.ToOperand(destination, kPointerSize));
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kPointerSize);
- __ movups(src, kScratchDoubleReg);
- }
- } else if (source->IsFPRegister() && destination->IsFPRegister()) {
- // XMM register-register swap.
- XMMRegister src = g.ToDoubleRegister(source);
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movapd(kScratchDoubleReg, src);
- __ Movapd(src, dst);
- __ Movapd(dst, kScratchDoubleReg);
- } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
- // XMM register-memory swap.
- XMMRegister src = g.ToDoubleRegister(source);
- Operand dst = g.ToOperand(destination);
- MachineRepresentation rep = LocationOperand::cast(source)->representation();
- if (rep != MachineRepresentation::kSimd128) {
- __ Movsd(kScratchDoubleReg, src);
- __ Movsd(src, dst);
- __ Movsd(dst, kScratchDoubleReg);
- } else {
- __ Movups(kScratchDoubleReg, src);
- __ Movups(src, dst);
- __ Movups(dst, kScratchDoubleReg);
+ switch (MoveType::InferSwap(source, destination)) {
+ case MoveType::kRegisterToRegister: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(src, dst);
+ __ movq(dst, kScratchRegister);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ Movapd(kScratchDoubleReg, src);
+ __ Movapd(src, dst);
+ __ Movapd(dst, kScratchDoubleReg);
+ }
+ return;
+ }
+ case MoveType::kRegisterToStack: {
+ if (source->IsRegister()) {
+ Register src = g.ToRegister(source);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ movq(src, g.ToOperand(destination));
+ frame_access_state()->IncreaseSPDelta(-1);
+ __ popq(g.ToOperand(destination));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ } else {
+ DCHECK(source->IsFPRegister());
+ XMMRegister src = g.ToDoubleRegister(source);
+ Operand dst = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(src, dst);
+ __ Movsd(dst, kScratchDoubleReg);
+ } else {
+ __ Movups(kScratchDoubleReg, src);
+ __ Movups(src, dst);
+ __ Movups(dst, kScratchDoubleReg);
+ }
+ }
+ return;
}
- } else {
- // No other combinations are possible.
- UNREACHABLE();
+ case MoveType::kStackToStack: {
+ Operand src = g.ToOperand(source);
+ Operand dst = g.ToOperand(destination);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ Register tmp = kScratchRegister;
+ __ movq(tmp, dst);
+ __ pushq(src); // Then use stack to copy src to destination.
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ movq(src, tmp);
+ } else {
+ // Without AVX, misaligned reads and writes will trap. Move using the
+ // stack, in two parts.
+ __ movups(kScratchDoubleReg, dst); // Save dst in scratch register.
+ __ pushq(src); // Then use stack to copy src to destination.
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ pushq(g.ToOperand(source, kPointerSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ __ popq(g.ToOperand(destination, kPointerSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ __ movups(src, kScratchDoubleReg);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ dq(targets[index]);
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 6d9bc6f820..e758072050 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -149,9 +149,12 @@ namespace compiler {
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
V(X64F32x4RecipApprox) \
V(X64F32x4RecipSqrtApprox) \
V(X64F32x4Add) \
+ V(X64F32x4AddHoriz) \
V(X64F32x4Sub) \
V(X64F32x4Mul) \
V(X64F32x4Min) \
@@ -266,8 +269,6 @@ namespace compiler {
V(M8I) /* [ %r2*8 + K] */ \
V(Root) /* [%root + K] */
-enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index c16fee5861..1d0e182303 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -128,7 +128,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4ReplaceLane:
case kX64F32x4RecipApprox:
case kX64F32x4RecipSqrtApprox:
+ case kX64F32x4Abs:
+ case kX64F32x4Neg:
case kX64F32x4Add:
+ case kX64F32x4AddHoriz:
case kX64F32x4Sub:
case kX64F32x4Mul:
case kX64F32x4Min:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index a0f14c687c..25dc5e9658 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -307,11 +307,16 @@ void InstructionSelector::VisitLoad(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
- code |= MiscField::encode(X64MemoryProtection::kProtected);
+ code |= MiscField::encode(kMemoryAccessProtected);
+ } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ code |= MiscField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@@ -391,7 +396,7 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
- MiscField::encode(X64MemoryProtection::kProtected);
+ MiscField::encode(kMemoryAccessProtected);
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
@@ -1139,7 +1144,8 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
return false;
}
}
- case IrOpcode::kLoad: {
+ case IrOpcode::kLoad:
+ case IrOpcode::kPoisonedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1248,7 +1254,12 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(BitcastInt32ToFloat32, kX64BitcastIF) \
V(BitcastInt64ToFloat64, kX64BitcastLD) \
V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
- V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
+ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32, kX64Movsxbl) \
+ V(SignExtendWord16ToInt32, kX64Movsxwl) \
+ V(SignExtendWord8ToInt64, kX64Movsxbq) \
+ V(SignExtendWord16ToInt64, kX64Movsxwq) \
+ V(SignExtendWord32ToInt64, kX64Movsxlq)
#define RR_OP_LIST(V) \
V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
@@ -1425,14 +1436,14 @@ void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
}
void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
X64OperandGenerator g(this);
// Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1477,9 +1488,9 @@ void InstructionSelector::EmitPrepareArguments(
}
}
-void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
X64OperandGenerator g(this);
int reverse_slot = 0;
@@ -1488,7 +1499,7 @@ void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
reverse_slot += output.location.GetSizeInPointers();
// Skip any alignment holes in nodes.
if (output.node == nullptr) continue;
- DCHECK(!descriptor->IsCFunctionCall());
+ DCHECK(!call_descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
@@ -1733,19 +1744,9 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
// Compare(Load(js_stack_limit), LoadStackPointer)
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kX64StackCheck);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()));
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), g.UseImmediate(cont->trap_id()));
- }
+ CHECK(cont->IsBranch());
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
return;
}
}
@@ -1782,12 +1783,13 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, opcode, right, left, cont, false);
}
+} // namespace
+
// Shared routine for word comparison against zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
+ while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
Int32BinopMatcher m(value);
if (!m.right().Is(0)) break;
@@ -1796,23 +1798,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->Negate();
}
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kInt32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kUint32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
Int64BinopMatcher m(value);
@@ -1820,44 +1822,44 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// Try to combine the branch with a comparison.
Node* const user = m.node();
Node* const value = m.left().node();
- if (selector->CanCover(user, value)) {
+ if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(selector, value, kX64Test, cont);
+ return VisitWordCompare(this, value, kX64Test, cont);
default:
break;
}
}
- return VisitCompareZero(selector, value, kX64Cmp, cont);
+ return VisitCompareZero(this, value, kX64Cmp, cont);
}
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kInt64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kUint64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord64Compare(selector, value, cont);
+ return VisitWord64Compare(this, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
+ return VisitFloat32Compare(this, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kFloat64LessThan: {
Float64BinopMatcher m(value);
if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
@@ -1871,16 +1873,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// avoids the costly Float64Abs.
cont->OverwriteAndNegateIfEqual(kNotEqual);
InstructionCode const opcode =
- selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
- return VisitCompare(selector, opcode, m.left().node(),
+ IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
+ return VisitCompare(this, opcode, m.left().node(),
m.right().InputAt(0), cont, false);
}
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
}
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
+ return VisitFloat64Compare(this, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
@@ -1892,23 +1894,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Add32, cont);
+ return VisitBinop(this, node, kX64Add32, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Sub32, cont);
+ return VisitBinop(this, node, kX64Sub32, cont);
case IrOpcode::kInt32MulWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Imul32, cont);
+ return VisitBinop(this, node, kX64Imul32, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Add, cont);
+ return VisitBinop(this, node, kX64Add, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX64Sub, cont);
+ return VisitBinop(this, node, kX64Sub, cont);
default:
break;
}
@@ -1916,51 +1918,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, kX64Cmp32, cont);
+ return VisitWordCompare(this, value, kX64Cmp32, cont);
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kX64Test32, cont);
+ return VisitWordCompare(this, value, kX64Test32, cont);
default:
break;
}
}
// Branch could not be combined with a compare, emit compare against 0.
- VisitCompareZero(selector, value, kX64Cmp32, cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+ VisitCompareZero(this, value, kX64Cmp32, cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -1968,27 +1935,30 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
- static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
- InstructionOperand index_operand = g.TempRegister();
- if (sw.min_value) {
- // The leal automatically zero extends, so result is a valid 64-bit index.
- Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
- } else {
- // Zero extend, because we use it as 64-bit index into the jump table.
- Emit(kX64Movl, index_operand, value_operand);
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = g.TempRegister();
+ if (sw.min_value) {
+ // The leal automatically zero extends, so result is a valid 64-bit
+ // index.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ } else {
+ // Zero extend, because we use it as 64-bit index into the jump table.
+ Emit(kX64Movl, index_operand, value_operand);
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
@@ -2001,7 +1971,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
Int32BinopMatcher m(user);
if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
}
VisitWordCompare(this, node, kX64Cmp32, &cont);
}
@@ -2182,7 +2152,7 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitAtomicLoad(Node* node) {
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
@@ -2191,7 +2161,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
VisitLoad(node);
}
-void InstructionSelector::VisitAtomicStore(Node* node) {
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2201,13 +2171,13 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
break;
case MachineRepresentation::kWord16:
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
break;
case MachineRepresentation::kWord32:
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
break;
default:
UNREACHABLE();
@@ -2229,7 +2199,7 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
-void InstructionSelector::VisitAtomicExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2238,15 +2208,15 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicExchangeInt8;
+ opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicExchangeUint8;
+ opcode = kWord32AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicExchangeInt16;
+ opcode = kWord32AtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicExchangeUint16;
+ opcode = kWord32AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicExchangeWord32;
+ opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2269,7 +2239,7 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2279,15 +2249,15 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
- opcode = kAtomicCompareExchangeInt8;
+ opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kAtomicCompareExchangeUint8;
+ opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kAtomicCompareExchangeInt16;
+ opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kAtomicCompareExchangeUint16;
+ opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kAtomicCompareExchangeWord32;
+ opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
@@ -2355,11 +2325,12 @@ void InstructionSelector::VisitAtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitAtomic##op(Node* node) { \
- VisitAtomicBinaryOperation(node, kAtomic##op##Int8, kAtomic##op##Uint8, \
- kAtomic##op##Int16, kAtomic##op##Uint16, \
- kAtomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitAtomicBinaryOperation( \
+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
+ kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2376,6 +2347,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_BINOP_LIST(V) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Min) \
@@ -2437,6 +2409,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Xor)
#define SIMD_UNOP_LIST(V) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(I32x4Neg) \
@@ -2565,6 +2539,9 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
+// static
+bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
index 8bb5903e54..e1c6000d4f 100644
--- a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
+++ b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
-#define V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
+#ifndef V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
+#define V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
#include "src/eh-frame.h"
@@ -76,4 +76,4 @@ class UnwindingInfoWriter {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_X64_UNWINDING_INFO_WRITER_X64_H_
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index c1bca7557e..02337fb456 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -32,59 +32,41 @@ enum ContextLookupFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
- async_function_await_caught) \
- V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
- async_function_await_uncaught) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
- async_function_promise_release) \
- V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
- V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
- V(MAKE_ERROR_INDEX, JSFunction, make_error) \
- V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
- V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
- V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
- V(OBJECT_CREATE, JSFunction, object_create) \
- V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
- V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
- V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
- V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
- V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
- V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
- V(OBJECT_KEYS, JSFunction, object_keys) \
- V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
- V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
- V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
- V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
- V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
- V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_BUFFER_INDEX, JSFunction, \
- typed_array_construct_by_array_buffer) \
- V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_LIKE_INDEX, JSFunction, \
- typed_array_construct_by_array_like) \
- V(TYPED_ARRAY_CONSTRUCT_BY_LENGTH_INDEX, JSFunction, \
- typed_array_construct_by_length) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
- V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
- V(IS_PROMISE_INDEX, JSFunction, is_promise) \
- V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
- V(PROMISE_HANDLE_REJECT_INDEX, JSFunction, promise_handle_reject) \
- V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
- V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
+ async_function_promise_release) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
+ V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
+ V(MAKE_ERROR_INDEX, JSFunction, make_error) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_CREATE, JSFunction, object_create) \
+ V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
+ V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
+ V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
+ V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
+ V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
+ V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
+ V(MATH_POW_INDEX, JSFunction, math_pow) \
+ V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
+ promise_internal_constructor) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
V(ARRAY_POP_INDEX, JSFunction, array_pop) \
V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
@@ -95,7 +77,6 @@ enum ContextLookupFlags {
V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
V(ERROR_TO_STRING, JSFunction, error_to_string) \
V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
@@ -121,8 +102,13 @@ enum ContextLookupFlags {
V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
wasm_compile_error_function) \
V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
- V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, wasm_runtime_error_function)
+ V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_runtime_error_function) \
+ V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
+ V(WEAKSET_ADD_INDEX, JSFunction, weakset_add)
+// If you add something here, also add it to ARRAY_ITERATOR_LIST in
+// bootstrapper.cc.
#define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V) \
V(TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, typed_array_key_iterator_map) \
V(FAST_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, fast_array_key_iterator_map) \
@@ -146,6 +132,10 @@ enum ContextLookupFlags {
float64_array_key_value_iterator_map) \
V(UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
uint8_clamped_array_key_value_iterator_map) \
+ V(BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ biguint64_array_key_value_iterator_map) \
+ V(BIGINT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ bigint64_array_key_value_iterator_map) \
\
V(FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
fast_smi_array_key_value_iterator_map) \
@@ -176,6 +166,10 @@ enum ContextLookupFlags {
float64_array_value_iterator_map) \
V(UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
uint8_clamped_array_value_iterator_map) \
+ V(BIGUINT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ biguint64_array_value_iterator_map) \
+ V(BIGINT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ bigint64_array_value_iterator_map) \
\
V(FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
fast_smi_array_value_iterator_map) \
@@ -197,35 +191,20 @@ enum ContextLookupFlags {
V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
accessor_property_descriptor_map) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
- V(ALLOW_WASM_EVAL_INDEX, Object, allow_wasm_eval) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
V(ARRAY_BUFFER_NOINIT_FUN_INDEX, JSFunction, array_buffer_noinit_fun) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map) \
- V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_function_await_reject_shared_fun) \
- V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_function_await_resolve_shared_fun) \
V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \
V(ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
async_generator_function_function) \
V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo, \
async_iterator_value_unwrap_shared_fun) \
- V(ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_generator_await_reject_shared_fun) \
- V(ASYNC_GENERATOR_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_await_resolve_shared_fun) \
- V(ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_yield_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_closed_resolve_shared_fun) \
- V(ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN, SharedFunctionInfo, \
- async_generator_return_closed_reject_shared_fun) \
V(ATOMICS_OBJECT, JSObject, atomics_object) \
V(BIGINT_FUNCTION_INDEX, JSFunction, bigint_function) \
+ V(BIGINT64_ARRAY_FUN_INDEX, JSFunction, bigint64_array_fun) \
+ V(BIGUINT64_ARRAY_FUN_INDEX, JSFunction, biguint64_array_fun) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \
bound_function_with_constructor_map) \
@@ -261,13 +240,18 @@ enum ContextLookupFlags {
V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_array_iterator_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
+ V(INITIAL_ARRAY_PROTOTYPE_MAP_INDEX, Map, initial_array_prototype_map) \
V(INITIAL_ERROR_PROTOTYPE_INDEX, JSObject, initial_error_prototype) \
V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \
V(INITIAL_ASYNC_GENERATOR_PROTOTYPE_INDEX, JSObject, \
initial_async_generator_prototype) \
V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
+ V(INITIAL_MAP_PROTOTYPE_MAP_INDEX, Map, initial_map_prototype_map) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INITIAL_SET_PROTOTYPE_MAP_INDEX, Map, initial_set_prototype_map) \
V(INITIAL_STRING_PROTOTYPE_INDEX, JSObject, initial_string_prototype) \
+ V(INITIAL_WEAKMAP_PROTOTYPE_MAP_INDEX, Map, initial_weakmap_prototype_map) \
+ V(INITIAL_WEAKSET_PROTOTYPE_MAP_INDEX, Map, initial_weakset_prototype_map) \
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
@@ -320,9 +304,10 @@ enum ContextLookupFlags {
V(PROXY_REVOKE_SHARED_FUN, SharedFunctionInfo, proxy_revoke_shared_fun) \
V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
promise_get_capabilities_executor_shared_fun) \
- V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
- promise_resolve_shared_fun) \
- V(PROMISE_REJECT_SHARED_FUN, SharedFunctionInfo, promise_reject_shared_fun) \
+ V(PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX, SharedFunctionInfo, \
+ promise_capability_default_reject_shared_fun) \
+ V(PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX, SharedFunctionInfo, \
+ promise_capability_default_resolve_shared_fun) \
V(PROMISE_THEN_FINALLY_SHARED_FUN, SharedFunctionInfo, \
promise_then_finally_shared_fun) \
V(PROMISE_CATCH_FINALLY_SHARED_FUN, SharedFunctionInfo, \
@@ -333,7 +318,7 @@ enum ContextLookupFlags {
promise_thrower_finally_shared_fun) \
V(PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN, SharedFunctionInfo, \
promise_all_resolve_element_shared_fun) \
- V(PROMISE_PROTOTYPE_MAP_INDEX, Map, promise_prototype_map) \
+ V(PROMISE_PROTOTYPE_INDEX, JSObject, promise_prototype) \
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
@@ -356,7 +341,7 @@ enum ContextLookupFlags {
slow_object_with_null_prototype_map) \
V(SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP, Map, \
slow_object_with_object_prototype_map) \
- V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, NumberDictionary, \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, SimpleNumberDictionary, \
slow_template_instantiations_cache) \
/* All *_FUNCTION_MAP_INDEX definitions used by Context::FunctionMapIndex */ \
/* must remain together. */ \
@@ -406,7 +391,6 @@ enum ContextLookupFlags {
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
- V(TEMPLATE_MAP_INDEX, HeapObject, template_map) \
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 21f90a50ae..b64f016df2 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -72,8 +72,10 @@ inline double DoubleToInteger(double x) {
int32_t DoubleToInt32(double x) {
- int32_t i = FastD2I(x);
- if (FastI2D(i) == x) return i;
+ if ((std::isfinite(x)) && (x <= INT_MAX) && (x >= INT_MIN)) {
+ int32_t i = static_cast<int32_t>(x);
+ if (FastI2D(i) == x) return i;
+ }
Double d(x);
int exponent = d.Exponent();
if (exponent < 0) {
@@ -94,14 +96,15 @@ bool DoubleToSmiInteger(double value, int* smi_int_value) {
}
bool IsSmiDouble(double value) {
- return !IsMinusZero(value) && value >= Smi::kMinValue &&
- value <= Smi::kMaxValue && value == FastI2D(FastD2I(value));
+ return std::isfinite(value) && !IsMinusZero(value) &&
+ value >= Smi::kMinValue && value <= Smi::kMaxValue &&
+ value == FastI2D(FastD2I(value));
}
bool IsInt32Double(double value) {
- return !IsMinusZero(value) && value >= kMinInt && value <= kMaxInt &&
- value == FastI2D(FastD2I(value));
+ return std::isfinite(value) && !IsMinusZero(value) && value >= kMinInt &&
+ value <= kMaxInt && value == FastI2D(FastD2I(value));
}
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index c5ea1b8366..827ccbd773 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -175,8 +175,8 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, Iterator current,
}
// ES6 18.2.5 parseInt(string, radix) (with NumberParseIntHelper subclass);
-// https://tc39.github.io/proposal-bigint/#sec-bigint-parseint-string-radix
-// (with BigIntParseIntHelper subclass).
+// and BigInt parsing cases from https://tc39.github.io/proposal-bigint/
+// (with StringToBigIntHelper subclass).
class StringToIntHelper {
public:
StringToIntHelper(Isolate* isolate, Handle<String> subject, int radix)
@@ -852,17 +852,12 @@ double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
return helper.GetResult();
}
-class BigIntParseIntHelper : public StringToIntHelper {
+class StringToBigIntHelper : public StringToIntHelper {
public:
- enum class Behavior { kParseInt, kStringToBigInt, kLiteral };
-
- // Used for BigInt.parseInt API, where the input is a Heap-allocated String.
- BigIntParseIntHelper(Isolate* isolate, Handle<String> string, int radix)
- : StringToIntHelper(isolate, string, radix),
- behavior_(Behavior::kParseInt) {}
+ enum class Behavior { kStringToBigInt, kLiteral };
// Used for StringToBigInt operation (BigInt constructor and == operator).
- BigIntParseIntHelper(Isolate* isolate, Handle<String> string)
+ StringToBigIntHelper(Isolate* isolate, Handle<String> string)
: StringToIntHelper(isolate, string),
behavior_(Behavior::kStringToBigInt) {
set_allow_binary_and_octal_prefixes();
@@ -871,7 +866,7 @@ class BigIntParseIntHelper : public StringToIntHelper {
// Used for parsing BigInt literals, where the input is a buffer of
// one-byte ASCII digits, along with an optional radix prefix.
- BigIntParseIntHelper(Isolate* isolate, const uint8_t* string, int length)
+ StringToBigIntHelper(Isolate* isolate, const uint8_t* string, int length)
: StringToIntHelper(isolate, string, length),
behavior_(Behavior::kLiteral) {
set_allow_binary_and_octal_prefixes();
@@ -884,9 +879,7 @@ class BigIntParseIntHelper : public StringToIntHelper {
return MaybeHandle<BigInt>();
}
if (state() == kEmpty) {
- if (behavior_ == Behavior::kParseInt) {
- set_state(kJunk);
- } else if (behavior_ == Behavior::kStringToBigInt) {
+ if (behavior_ == Behavior::kStringToBigInt) {
set_state(kZero);
} else {
UNREACHABLE();
@@ -924,9 +917,12 @@ class BigIntParseIntHelper : public StringToIntHelper {
// Optimization opportunity: Would it makes sense to scan for trailing
// junk before allocating the result?
int charcount = length() - cursor();
- // TODO(adamk): Pretenure if this is for a literal.
- MaybeHandle<FreshlyAllocatedBigInt> maybe =
- BigInt::AllocateFor(isolate(), radix(), charcount, should_throw());
+ // For literals, we pretenure the allocated BigInt, since it's about
+ // to be stored in the interpreter's constants array.
+ PretenureFlag pretenure =
+ behavior_ == Behavior::kLiteral ? TENURED : NOT_TENURED;
+ MaybeHandle<FreshlyAllocatedBigInt> maybe = BigInt::AllocateFor(
+ isolate(), radix(), charcount, should_throw(), pretenure);
if (!maybe.ToHandle(&result_)) {
set_state(kError);
}
@@ -938,28 +934,20 @@ class BigIntParseIntHelper : public StringToIntHelper {
}
private:
- ShouldThrow should_throw() const {
- return behavior_ == Behavior::kParseInt ? kThrowOnError : kDontThrow;
- }
+ ShouldThrow should_throw() const { return kDontThrow; }
Handle<FreshlyAllocatedBigInt> result_;
Behavior behavior_;
};
-MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
- int radix) {
- BigIntParseIntHelper helper(isolate, string, radix);
- return helper.GetResult();
-}
-
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
string = String::Flatten(string);
- BigIntParseIntHelper helper(isolate, string);
+ StringToBigIntHelper helper(isolate, string);
return helper.GetResult();
}
MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate, const char* string) {
- BigIntParseIntHelper helper(isolate, reinterpret_cast<const uint8_t*>(string),
+ StringToBigIntHelper helper(isolate, reinterpret_cast<const uint8_t*>(string),
static_cast<int>(strlen(string)));
return helper.GetResult();
}
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 915a286e8f..6189fe0aa1 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -105,9 +105,6 @@ double StringToDouble(UnicodeCache* unicode_cache,
double StringToInt(Isolate* isolate, Handle<String> string, int radix);
-// This follows BigInt.parseInt semantics: "" => SyntaxError.
-MaybeHandle<BigInt> BigIntParseInt(Isolate* isolate, Handle<String> string,
- int radix);
// This follows https://tc39.github.io/proposal-bigint/#sec-string-to-bigint
// semantics: "" => 0n.
MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string);
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index e41fa276a8..001beb938e 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -84,7 +84,6 @@ void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) {
- // Compute the delta between start and stop, in microseconds.
int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
? timer->Elapsed().InMicroseconds()
: timer->Elapsed().InMilliseconds();
@@ -96,6 +95,21 @@ void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
}
}
+void TimedHistogram::RecordAbandon(base::ElapsedTimer* timer,
+ Isolate* isolate) {
+ if (Enabled()) {
+ DCHECK(timer->IsStarted());
+ timer->Stop();
+ int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
+ ? base::TimeDelta::Max().InMicroseconds()
+ : base::TimeDelta::Max().InMilliseconds();
+ AddSample(static_cast<int>(sample));
+ }
+ if (isolate != nullptr) {
+ Logger::CallEventLogger(isolate, name(), Logger::END, true);
+ }
+}
+
Counters::Counters(Isolate* isolate)
: isolate_(isolate),
stats_table_(this),
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index b3c6f8c8ff..e06cb8b66d 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -220,6 +220,12 @@ class Histogram {
int max() const { return max_; }
int num_buckets() const { return num_buckets_; }
+ // Asserts that |expected_counters| are the same as the Counters this
+ // Histogram reports to.
+ void AssertReportsToCounters(Counters* expected_counters) {
+ DCHECK_EQ(counters_, expected_counters);
+ }
+
protected:
Histogram() {}
Histogram(const char* name, int min, int max, int num_buckets,
@@ -229,7 +235,9 @@ class Histogram {
max_(max),
num_buckets_(num_buckets),
histogram_(nullptr),
- counters_(counters) {}
+ counters_(counters) {
+ DCHECK(counters_);
+ }
Counters* counters() const { return counters_; }
@@ -261,6 +269,10 @@ class TimedHistogram : public Histogram {
// Stop the timer and record the results. Log if isolate non-null.
void Stop(base::ElapsedTimer* timer, Isolate* isolate);
+ // Records a TimeDelta::Max() result. Useful to record percentage of tasks
+ // that never got to run in a given scenario. Log if isolate non-null.
+ void RecordAbandon(base::ElapsedTimer* timer, Isolate* isolate);
+
protected:
friend class Counters;
HistogramTimerResolution resolution_;
@@ -282,6 +294,7 @@ class TimedHistogramScope {
: histogram_(histogram), isolate_(isolate) {
histogram_->Start(&timer_, isolate);
}
+
~TimedHistogramScope() { histogram_->Stop(&timer_, isolate_); }
private:
@@ -292,6 +305,42 @@ class TimedHistogramScope {
DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
};
+// Helper class for recording a TimedHistogram asynchronously with manual
+// controls (it will not generate a report if destroyed without explicitly
+// triggering a report). |async_counters| should be a shared_ptr to
+// |histogram->counters()|, making it is safe to report to an
+// AsyncTimedHistogram after the associated isolate has been destroyed.
+// AsyncTimedHistogram can be moved/copied to avoid computing Now() multiple
+// times when the times of multiple tasks are identical; each copy will generate
+// its own report.
+class AsyncTimedHistogram {
+ public:
+ explicit AsyncTimedHistogram(TimedHistogram* histogram,
+ std::shared_ptr<Counters> async_counters)
+ : histogram_(histogram), async_counters_(std::move(async_counters)) {
+ histogram_->AssertReportsToCounters(async_counters_.get());
+ histogram_->Start(&timer_, nullptr);
+ }
+
+ ~AsyncTimedHistogram() = default;
+
+ AsyncTimedHistogram(const AsyncTimedHistogram& other) = default;
+ AsyncTimedHistogram& operator=(const AsyncTimedHistogram& other) = default;
+ AsyncTimedHistogram(AsyncTimedHistogram&& other) = default;
+ AsyncTimedHistogram& operator=(AsyncTimedHistogram&& other) = default;
+
+ // Records the time elapsed to |histogram_| and stops |timer_|.
+ void RecordDone() { histogram_->Stop(&timer_, nullptr); }
+
+ // Records TimeDelta::Max() to |histogram_| and stops |timer_|.
+ void RecordAbandon() { histogram_->RecordAbandon(&timer_, nullptr); }
+
+ private:
+ base::ElapsedTimer timer_;
+ TimedHistogram* histogram_;
+ std::shared_ptr<Counters> async_counters_;
+};
+
// Helper class for scoping a TimedHistogram, where the histogram is selected at
// stop time rather than start time.
// TODO(leszeks): This is heavily reliant on TimedHistogram::Start() doing
@@ -643,6 +692,8 @@ class RuntimeCallTimer final {
V(ArrayBuffer_New) \
V(Array_CloneElementAt) \
V(Array_New) \
+ V(BigInt64Array_New) \
+ V(BigUint64Array_New) \
V(BooleanObject_BooleanValue) \
V(BooleanObject_New) \
V(Context_New) \
@@ -795,7 +846,6 @@ class RuntimeCallTimer final {
V(CompileBackgroundEval) \
V(CompileBackgroundIgnition) \
V(CompileBackgroundScript) \
- V(CompileBackgroundRenumber) \
V(CompileBackgroundRewriteReturnResult) \
V(CompileBackgroundScopeAnalysis) \
V(CompileDeserialize) \
@@ -805,7 +855,6 @@ class RuntimeCallTimer final {
V(CompileGetFromOptimizedCodeMap) \
V(CompileIgnition) \
V(CompileIgnitionFinalization) \
- V(CompileRenumber) \
V(CompileRewriteReturnResult) \
V(CompileScopeAnalysis) \
V(CompileScript) \
@@ -1170,7 +1219,9 @@ class RuntimeCallTimerScope {
HT(compile_script_no_cache_because_cache_too_cold, \
V8.CompileScriptMicroSeconds.NoCache.CacheTooCold, 1000000, MICROSECOND) \
HT(compile_script_on_background, \
- V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND)
+ V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
+ HT(gc_parallel_task_latency, V8.GC.ParallelTaskLatencyMicroSeconds, 1000000, \
+ MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 3aae30799f..c16b963776 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -361,8 +361,8 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
// We're disabling usage of waitid in Mac OS X because it doesn't work for us:
// a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401.
-#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) \
- && !defined(__NetBSD__)
+#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) && \
+ !defined(__NetBSD__) && !defined(__Fuchsia__)
#if !defined(__FreeBSD__)
#define HAS_WAITID 1
#endif
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 32f129821a..0f2ba4257e 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -84,15 +84,18 @@ class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator {
allocator_->Free(data, length);
}
- void* Reserve(size_t length) override { return allocator_->Reserve(length); }
+ void* Reserve(size_t length) override {
+ UNIMPLEMENTED();
+ return nullptr;
+ }
void Free(void* data, size_t length, AllocationMode mode) override {
- allocator_->Free(data, length, mode);
+ UNIMPLEMENTED();
}
void SetProtection(void* data, size_t length,
Protection protection) override {
- allocator_->SetProtection(data, length, protection);
+ UNIMPLEMENTED();
}
private:
@@ -121,18 +124,6 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
}
- void* Reserve(size_t length) override {
- // |length| must be over the threshold so we can distinguish VM from
- // malloced memory.
- DCHECK_LE(kVMThreshold, length);
- return ArrayBufferAllocatorBase::Reserve(length);
- }
-
- void Free(void* data, size_t length, AllocationMode) override {
- // Ignore allocation mode; the appropriate action is determined by |length|.
- Free(data, length);
- }
-
private:
static constexpr size_t kVMThreshold = 65536;
static constexpr size_t kTwoGB = 2u * 1024u * 1024u * 1024u;
@@ -172,14 +163,6 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
return ArrayBufferAllocatorBase::Free(data, Adjust(length));
}
- void* Reserve(size_t length) override {
- return ArrayBufferAllocatorBase::Reserve(Adjust(length));
- }
-
- void Free(void* data, size_t length, AllocationMode mode) override {
- return ArrayBufferAllocatorBase::Free(data, Adjust(length), mode);
- }
-
private:
size_t Adjust(size_t length) {
const size_t kAllocationLimit = 10 * kMB;
@@ -621,8 +604,9 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context> context(isolate->GetCurrentContext());
ScriptOrigin origin(name);
- if (options.compile_options == ScriptCompiler::kConsumeCodeCache ||
- options.compile_options == ScriptCompiler::kConsumeParserCache) {
+ DCHECK(options.compile_options != ScriptCompiler::kProduceParserCache);
+ DCHECK(options.compile_options != ScriptCompiler::kConsumeParserCache);
+ if (options.compile_options == ScriptCompiler::kConsumeCodeCache) {
ScriptCompiler::CachedData* cached_code =
LookupCodeCache(isolate, source);
if (cached_code != nullptr) {
@@ -656,9 +640,6 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
ScriptCompiler::Source script_source(source, origin);
maybe_script = ScriptCompiler::Compile(context, &script_source,
options.compile_options);
- if (options.compile_options == ScriptCompiler::kProduceParserCache) {
- StoreInCodeCache(isolate, source, script_source.GetCachedData());
- }
}
Local<Script> script;
@@ -957,9 +938,7 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
std::string source_url = ToSTLString(isolate, referrer);
std::string dir_name =
- DirName(IsAbsolutePath(source_url)
- ? source_url
- : NormalizePath(source_url, GetWorkingDirectory()));
+ DirName(NormalizePath(source_url, GetWorkingDirectory()));
std::string file_name = ToSTLString(isolate, specifier);
std::string absolute_path = NormalizePath(file_name, dir_name);
@@ -2576,7 +2555,11 @@ void SourceGroup::JoinThread() {
ExternalizedContents::~ExternalizedContents() {
if (base_ != nullptr) {
- Shell::array_buffer_allocator->Free(base_, length_, mode_);
+ if (mode_ == ArrayBuffer::Allocator::AllocationMode::kReservation) {
+ CHECK(i::FreePages(base_, length_));
+ } else {
+ Shell::array_buffer_allocator->Free(base_, length_);
+ }
}
}
@@ -2852,8 +2835,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
options.code_cache_options =
ShellOptions::CodeCacheOptions::kProduceCache;
- } else if (strncmp(value, "=parse", 7) == 0) {
- options.compile_options = v8::ScriptCompiler::kProduceParserCache;
} else if (strncmp(value, "=none", 6) == 0) {
options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
options.code_cache_options =
@@ -2900,6 +2881,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--quiet-load") == 0) {
options.quiet_load = true;
argv[i] = nullptr;
+ } else if (strncmp(argv[i], "--thread-pool-size=", 19) == 0) {
+ options.thread_pool_size = atoi(argv[i] + 19);
+ argv[i] = nullptr;
}
}
@@ -3085,6 +3069,13 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> Release() { return std::move(data_); }
+ void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
+ to->insert(to->end(),
+ std::make_move_iterator(externalized_contents_.begin()),
+ std::make_move_iterator(externalized_contents_.end()));
+ externalized_contents_.clear();
+ }
+
protected:
// Implements ValueSerializer::Delegate.
void ThrowDataCloneError(Local<String> message) override {
@@ -3102,6 +3093,8 @@ class Serializer : public ValueSerializer::Delegate {
size_t index = shared_array_buffers_.size();
shared_array_buffers_.emplace_back(isolate_, shared_array_buffer);
+ data_->shared_array_buffer_contents_.push_back(
+ MaybeExternalize(shared_array_buffer));
return Just<uint32_t>(static_cast<uint32_t>(index));
}
@@ -3155,7 +3148,7 @@ class Serializer : public ValueSerializer::Delegate {
return array_buffer->GetContents();
} else {
typename T::Contents contents = array_buffer->Externalize();
- data_->externalized_contents_.emplace_back(contents);
+ externalized_contents_.emplace_back(contents);
return contents;
}
}
@@ -3174,13 +3167,6 @@ class Serializer : public ValueSerializer::Delegate {
data_->array_buffer_contents_.push_back(contents);
}
- for (const auto& global_shared_array_buffer : shared_array_buffers_) {
- Local<SharedArrayBuffer> shared_array_buffer =
- Local<SharedArrayBuffer>::New(isolate_, global_shared_array_buffer);
- data_->shared_array_buffer_contents_.push_back(
- MaybeExternalize(shared_array_buffer));
- }
-
return Just(true);
}
@@ -3189,6 +3175,7 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> data_;
std::vector<Global<ArrayBuffer>> array_buffers_;
std::vector<Global<SharedArrayBuffer>> shared_array_buffers_;
+ std::vector<ExternalizedContents> externalized_contents_;
size_t current_memory_usage_;
DISALLOW_COPY_AND_ASSIGN(Serializer);
@@ -3216,16 +3203,21 @@ class Deserializer : public ValueDeserializer::Delegate {
deserializer_.TransferArrayBuffer(index++, array_buffer);
}
- index = 0;
- for (const auto& contents : data_->shared_array_buffer_contents()) {
- Local<SharedArrayBuffer> shared_array_buffer = SharedArrayBuffer::New(
- isolate_, contents.Data(), contents.ByteLength());
- deserializer_.TransferSharedArrayBuffer(index++, shared_array_buffer);
- }
-
return deserializer_.ReadValue(context);
}
+ MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
+ Isolate* isolate, uint32_t clone_id) override {
+ DCHECK_NOT_NULL(data_);
+ if (clone_id < data_->shared_array_buffer_contents().size()) {
+ SharedArrayBuffer::Contents contents =
+ data_->shared_array_buffer_contents().at(clone_id);
+ return SharedArrayBuffer::New(isolate_, contents.Data(),
+ contents.ByteLength());
+ }
+ return MaybeLocal<SharedArrayBuffer>();
+ }
+
private:
Isolate* isolate_;
ValueDeserializer deserializer_;
@@ -3242,9 +3234,11 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
if (serializer.WriteValue(context, value, transfer).To(&ok)) {
std::unique_ptr<SerializationData> data = serializer.Release();
base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
- data->AppendExternalizedContentsTo(&externalized_contents_);
+ serializer.AppendExternalizedContentsTo(&externalized_contents_);
return data;
}
+ // Append externalized contents even when WriteValue fails.
+ serializer.AppendExternalizedContentsTo(&externalized_contents_);
return nullptr;
}
@@ -3318,8 +3312,8 @@ int Shell::Main(int argc, char* argv[]) {
platform::tracing::TracingController* tracing_controller = tracing.get();
g_platform = v8::platform::NewDefaultPlatform(
- 0, v8::platform::IdleTaskSupport::kEnabled, in_process_stack_dumping,
- std::move(tracing));
+ options.thread_pool_size, v8::platform::IdleTaskSupport::kEnabled,
+ in_process_stack_dumping, std::move(tracing));
if (i::FLAG_verify_predictable) {
g_platform.reset(new PredictablePlatform(std::move(g_platform)));
}
@@ -3422,14 +3416,9 @@ int Shell::Main(int argc, char* argv[]) {
result = RunMain(isolate, argc, argv, false);
// Change the options to consume cache
- if (options.compile_options == v8::ScriptCompiler::kProduceParserCache) {
- options.compile_options = v8::ScriptCompiler::kConsumeParserCache;
- } else {
- DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
- options.compile_options ==
- v8::ScriptCompiler::kNoCompileOptions);
- options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
- }
+ DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
+ options.compile_options == v8::ScriptCompiler::kNoCompileOptions);
+ options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
printf("============ Run: Consume code cache ============\n");
// Second run to consume the cache in new isolate
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 8fc6eab046..bf4793ef04 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -197,12 +197,6 @@ class SerializationData {
return shared_array_buffer_contents_;
}
- void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
- to->insert(to->end(),
- std::make_move_iterator(externalized_contents_.begin()),
- std::make_move_iterator(externalized_contents_.end()));
- externalized_contents_.clear();
- }
private:
struct DataDeleter {
@@ -213,7 +207,6 @@ class SerializationData {
size_t size_;
std::vector<ArrayBuffer::Contents> array_buffer_contents_;
std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
- std::vector<ExternalizedContents> externalized_contents_;
private:
friend class Serializer;
@@ -358,6 +351,7 @@ class ShellOptions {
int read_from_tcp_port;
bool enable_os_system = false;
bool quiet_load = false;
+ int thread_pool_size = 0;
};
class Shell : public i::AllStatic {
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index b8a9263d32..d9fa13dae5 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -5,8 +5,6 @@
#ifndef V8_DATE_H_
#define V8_DATE_H_
-#include "src/allocation.h"
-#include "src/base/platform/platform.h"
#include "src/base/timezone-cache.h"
#include "src/globals.h"
@@ -289,4 +287,4 @@ class DateCache {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_DATE_H_
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 47280bfbc9..e2c3f4738d 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -35,10 +35,9 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
__ Mov(fp, x1);
- __ AssertStackConsistency();
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Mov(masm->StackPointer(), Operand(fp));
+ __ Mov(sp, fp);
__ Pop(fp, lr); // Frame, Return address.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index e5865e639c..6052149b81 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -21,29 +21,24 @@
namespace v8 {
namespace internal {
-static inline bool IsDebugContext(Isolate* isolate, Context* context) {
- return context->native_context() == *isolate->debug()->debug_context();
-}
-
MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<String> source) {
- // Handle the processing of break.
- DisableBreak disable_break_scope(isolate->debug());
-
- // Enter the top context from before the debugger was invoked.
- SaveContext save(isolate);
- SaveContext* top = &save;
- while (top != nullptr && IsDebugContext(isolate, *top->context())) {
- top = top->prev();
- }
- if (top != nullptr) isolate->set_context(*top->context());
-
- // Get the native context now set to the top context from before the
- // debugger was invoked.
Handle<Context> context = isolate->native_context();
- Handle<JSObject> receiver(context->global_proxy());
- Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
- return Evaluate(isolate, outer_info, context, receiver, source, false);
+ ScriptOriginOptions origin_options(false, true);
+ MaybeHandle<SharedFunctionInfo> maybe_function_info =
+ Compiler::GetSharedFunctionInfoForScript(
+ source, Compiler::ScriptDetails(isolate->factory()->empty_string()),
+ origin_options, nullptr, nullptr, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE);
+
+ Handle<SharedFunctionInfo> shared_info;
+ if (!maybe_function_info.ToHandle(&shared_info)) return MaybeHandle<Object>();
+
+ Handle<JSFunction> fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared_info,
+ context);
+ return Execution::Call(isolate, fun,
+ Handle<JSObject>(context->global_proxy()), 0, nullptr);
}
MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
@@ -278,6 +273,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ToString) \
V(ToLength) \
V(ToNumber) \
+ V(ToBigInt) \
V(NumberToStringSkipCache) \
/* Type checks */ \
V(IsJSReceiver) \
@@ -292,7 +288,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(IsJSWeakSet) \
V(IsRegExp) \
V(IsTypedArray) \
- V(ClassOf) \
/* Loads */ \
V(LoadLookupSlotForCall) \
/* Arrays */ \
@@ -302,6 +297,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(TrySliceSimpleNonFastElements) \
V(HasComplexElements) \
V(EstimateNumberOfElements) \
+ V(NewArray) \
+ V(TypedArrayGetBuffer) \
/* Errors */ \
V(ReThrow) \
V(ThrowReferenceError) \
@@ -309,13 +306,14 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ThrowIteratorResultNotAnObject) \
V(NewTypeError) \
V(ThrowInvalidStringLength) \
+ V(ThrowCalledNonCallable) \
/* Strings */ \
V(StringIndexOf) \
V(StringIncludes) \
V(StringReplaceOneCharWithString) \
V(StringToNumber) \
V(StringTrim) \
- V(SubString) \
+ V(StringSubstring) \
V(RegExpInternalReplace) \
/* BigInts */ \
V(BigIntEqualToBigInt) \
@@ -325,9 +323,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(CreateArrayLiteral) \
V(CreateObjectLiteral) \
V(CreateRegExpLiteral) \
- /* Collections */ \
- V(GenericHash) \
/* Called from builtins */ \
+ V(ClassOf) \
V(StringAdd) \
V(StringParseFloat) \
V(StringParseInt) \
@@ -343,18 +340,19 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(AllocateSeqOneByteString) \
V(AllocateSeqTwoByteString) \
V(ObjectCreate) \
+ V(ObjectEntries) \
+ V(ObjectEntriesSkipFastPath) \
V(ObjectHasOwnProperty) \
+ V(ObjectValues) \
+ V(ObjectValuesSkipFastPath) \
V(ArrayIndexOf) \
V(ArrayIncludes_Slow) \
V(ArrayIsArray) \
V(ThrowTypeError) \
- V(ThrowCalledOnNullOrUndefined) \
- V(ThrowIncompatibleMethodReceiver) \
- V(ThrowInvalidHint) \
- V(ThrowNotDateError) \
V(ThrowRangeError) \
V(ToName) \
V(GetOwnPropertyDescriptor) \
+ V(HasProperty) \
V(StackGuard) \
/* Misc. */ \
V(Call) \
@@ -362,7 +360,12 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(NewObject) \
V(CompleteInobjectSlackTrackingForMap) \
V(HasInPrototypeChain) \
- V(StringMaxLength)
+ V(StringMaxLength) \
+ /* Test */ \
+ V(OptimizeOsr) \
+ V(OptimizeFunctionOnNextCall) \
+ V(UnblockConcurrentRecompilation) \
+ V(GetOptimizationStatus)
#define CASE(Name) \
case Runtime::k##Name: \
@@ -383,6 +386,42 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
#undef INTRINSIC_WHITELIST
}
+#ifdef DEBUG
+bool BuiltinToIntrinsicHasNoSideEffect(Builtins::Name builtin_id,
+ Runtime::FunctionId intrinsic_id) {
+ // First check the intrinsic whitelist.
+ if (IntrinsicHasNoSideEffect(intrinsic_id)) return true;
+
+// Whitelist intrinsics called from specific builtins.
+#define BUILTIN_INTRINSIC_WHITELIST(V, W) \
+ /* Arrays */ \
+ V(Builtins::kArrayFilter, W(CreateDataProperty)) \
+ V(Builtins::kArrayMap, W(CreateDataProperty)) \
+ V(Builtins::kArrayPrototypeSlice, W(CreateDataProperty) W(SetProperty)) \
+ /* TypedArrays */ \
+ V(Builtins::kTypedArrayPrototypeFilter, W(TypedArrayCopyElements)) \
+ V(Builtins::kTypedArrayPrototypeMap, W(SetProperty))
+
+#define CASE(Builtin, ...) \
+ case Builtin: \
+ return (__VA_ARGS__ false);
+
+#define MATCH(Intrinsic) \
+ intrinsic_id == Runtime::k##Intrinsic || \
+ intrinsic_id == Runtime::kInline##Intrinsic ||
+
+ switch (builtin_id) {
+ BUILTIN_INTRINSIC_WHITELIST(CASE, MATCH)
+ default:
+ return false;
+ }
+
+#undef MATCH
+#undef CASE
+#undef BUILTIN_INTRINSIC_WHITELIST
+}
+#endif // DEBUG
+
bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
typedef interpreter::Bytecode Bytecode;
typedef interpreter::Bytecodes Bytecodes;
@@ -512,6 +551,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kObjectPrototypePropertyIsEnumerable:
case Builtins::kObjectPrototypeToString:
// Array builtins.
+ case Builtins::kArrayIsArray:
case Builtins::kArrayConstructor:
case Builtins::kArrayIndexOf:
case Builtins::kArrayPrototypeValues:
@@ -520,11 +560,60 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kArrayPrototypeFind:
case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayPrototypeKeys:
+ case Builtins::kArrayPrototypeSlice:
case Builtins::kArrayForEach:
case Builtins::kArrayEvery:
case Builtins::kArraySome:
+ case Builtins::kArrayConcat:
+ case Builtins::kArraySlice:
+ case Builtins::kArrayFilter:
+ case Builtins::kArrayMap:
case Builtins::kArrayReduce:
case Builtins::kArrayReduceRight:
+ // TypedArray builtins.
+ case Builtins::kTypedArrayConstructor:
+ case Builtins::kTypedArrayPrototypeBuffer:
+ case Builtins::kTypedArrayPrototypeByteLength:
+ case Builtins::kTypedArrayPrototypeByteOffset:
+ case Builtins::kTypedArrayPrototypeLength:
+ case Builtins::kTypedArrayPrototypeEntries:
+ case Builtins::kTypedArrayPrototypeKeys:
+ case Builtins::kTypedArrayPrototypeValues:
+ case Builtins::kTypedArrayPrototypeFind:
+ case Builtins::kTypedArrayPrototypeFindIndex:
+ case Builtins::kTypedArrayPrototypeIncludes:
+ case Builtins::kTypedArrayPrototypeIndexOf:
+ case Builtins::kTypedArrayPrototypeLastIndexOf:
+ case Builtins::kTypedArrayPrototypeSlice:
+ case Builtins::kTypedArrayPrototypeSubArray:
+ case Builtins::kTypedArrayPrototypeEvery:
+ case Builtins::kTypedArrayPrototypeSome:
+ case Builtins::kTypedArrayPrototypeFilter:
+ case Builtins::kTypedArrayPrototypeMap:
+ case Builtins::kTypedArrayPrototypeReduce:
+ case Builtins::kTypedArrayPrototypeReduceRight:
+ case Builtins::kTypedArrayPrototypeForEach:
+ // ArrayBuffer builtins.
+ case Builtins::kArrayBufferConstructor:
+ case Builtins::kArrayBufferPrototypeGetByteLength:
+ case Builtins::kArrayBufferIsView:
+ case Builtins::kArrayBufferPrototypeSlice:
+ case Builtins::kReturnReceiver:
+ // DataView builtins.
+ case Builtins::kDataViewConstructor:
+ case Builtins::kDataViewPrototypeGetBuffer:
+ case Builtins::kDataViewPrototypeGetByteLength:
+ case Builtins::kDataViewPrototypeGetByteOffset:
+ case Builtins::kDataViewPrototypeGetInt8:
+ case Builtins::kDataViewPrototypeGetUint8:
+ case Builtins::kDataViewPrototypeGetInt16:
+ case Builtins::kDataViewPrototypeGetUint16:
+ case Builtins::kDataViewPrototypeGetInt32:
+ case Builtins::kDataViewPrototypeGetUint32:
+ case Builtins::kDataViewPrototypeGetFloat32:
+ case Builtins::kDataViewPrototypeGetFloat64:
+ case Builtins::kDataViewPrototypeGetBigInt64:
+ case Builtins::kDataViewPrototypeGetBigUint64:
// Boolean bulitins.
case Builtins::kBooleanConstructor:
case Builtins::kBooleanPrototypeToString:
@@ -562,11 +651,17 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kDatePrototypeValueOf:
// Map builtins.
case Builtins::kMapConstructor:
+ case Builtins::kMapPrototypeForEach:
case Builtins::kMapPrototypeGet:
+ case Builtins::kMapPrototypeHas:
case Builtins::kMapPrototypeEntries:
case Builtins::kMapPrototypeGetSize:
case Builtins::kMapPrototypeKeys:
case Builtins::kMapPrototypeValues:
+ // WeakMap builtins.
+ case Builtins::kWeakMapConstructor:
+ case Builtins::kWeakMapGet:
+ case Builtins::kWeakMapHas:
// Math builtins.
case Builtins::kMathAbs:
case Builtins::kMathAcos:
@@ -619,8 +714,13 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
// Set builtins.
case Builtins::kSetConstructor:
case Builtins::kSetPrototypeEntries:
+ case Builtins::kSetPrototypeForEach:
case Builtins::kSetPrototypeGetSize:
+ case Builtins::kSetPrototypeHas:
case Builtins::kSetPrototypeValues:
+ // WeakSet builtins.
+ case Builtins::kWeakSetConstructor:
+ case Builtins::kWeakSetHas:
// String builtins. Strings are immutable.
case Builtins::kStringFromCharCode:
case Builtins::kStringFromCodePoint:
@@ -659,11 +759,11 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeToUpperCase:
#endif
case Builtins::kStringPrototypeTrim:
- case Builtins::kStringPrototypeTrimLeft:
- case Builtins::kStringPrototypeTrimRight:
+ case Builtins::kStringPrototypeTrimEnd:
+ case Builtins::kStringPrototypeTrimStart:
case Builtins::kStringPrototypeValueOf:
case Builtins::kStringToNumber:
- case Builtins::kSubString:
+ case Builtins::kStringSubstring:
// Symbol builtins.
case Builtins::kSymbolConstructor:
case Builtins::kSymbolKeyFor:
@@ -759,11 +859,6 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
DCHECK(Builtins::IsLazy(builtin_index));
DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_index));
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing builtin %s\n",
- Builtins::name(builtin_index));
- }
-
code = Snapshot::DeserializeBuiltin(isolate, builtin_index);
DCHECK_NE(Builtins::kDeserializeLazy, code->builtin_index());
}
@@ -775,7 +870,9 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
Address address = rinfo->target_external_reference();
const Runtime::Function* function = Runtime::FunctionForEntry(address);
if (function == nullptr) continue;
- if (!IntrinsicHasNoSideEffect(function->function_id)) {
+ if (!BuiltinToIntrinsicHasNoSideEffect(
+ static_cast<Builtins::Name>(builtin_index),
+ function->function_id)) {
PrintF("Whitelisted builtin %s calls non-whitelisted intrinsic %s\n",
Builtins::name(builtin_index), function->name);
failed = true;
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 70f3670ee4..a7426eb96e 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -43,8 +43,9 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
js_frame, inlined_frame_index, isolate));
} else if (frame_->is_wasm_interpreter_entry()) {
wasm_interpreted_frame_ =
- summary.AsWasm().wasm_instance()->debug_info()->GetInterpretedFrame(
- frame_->fp(), inlined_frame_index);
+ WasmInterpreterEntryFrame::cast(frame_)
+ ->debug_info()
+ ->GetInterpretedFrame(frame_->fp(), inlined_frame_index);
DCHECK(wasm_interpreted_frame_);
}
}
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index c8c1e76ef2..eef65f5100 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -5,7 +5,6 @@
#ifndef V8_DEBUG_DEBUG_INTERFACE_H_
#define V8_DEBUG_DEBUG_INTERFACE_H_
-#include "include/v8-debug.h"
#include "include/v8-util.h"
#include "include/v8.h"
@@ -181,12 +180,10 @@ class DebugDelegate {
bool is_blackboxed) {}
virtual void ScriptCompiled(v8::Local<Script> script, bool is_live_edited,
bool has_compile_error) {}
- // |break_points_hit| contains installed by JS debug API breakpoint objects.
// |inspector_break_points_hit| contains id of breakpoints installed with
// debug::Script::SetBreakpoint API.
virtual void BreakProgramRequested(
v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<debug::BreakpointId>& inspector_break_points_hit) {}
virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
@@ -500,6 +497,9 @@ int GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
int64_t GetNextRandomInt64(v8::Isolate* isolate);
+v8::MaybeLocal<v8::Value> EvaluateGlobal(v8::Isolate* isolate,
+ v8::Local<v8::String> source);
+
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index 2e06dccab6..9d843ed17e 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -176,8 +176,7 @@ v8::debug::ScopeIterator::ScopeType DebugWasmScopeIterator::GetType() {
v8::Local<v8::Object> DebugWasmScopeIterator::GetObject() {
DCHECK(!Done());
Handle<WasmDebugInfo> debug_info(
- WasmInterpreterEntryFrame::cast(frame_)->wasm_instance()->debug_info(),
- isolate_);
+ WasmInterpreterEntryFrame::cast(frame_)->debug_info(), isolate_);
switch (type_) {
case debug::ScopeIterator::ScopeTypeGlobal:
return Utils::ToLocal(WasmDebugInfo::GetGlobalScopeObject(
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 77654de635..fda85bd88d 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -121,10 +121,11 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
CollectNonLocals(info.get(), scope);
}
if (!ignore_nested_scopes) {
- DeclarationScope::Analyze(info.get());
- DeclarationScope::AllocateScopeInfos(info.get(), isolate_,
- AnalyzeMode::kDebugger);
- RetrieveScopeChain(scope);
+ if (DeclarationScope::Analyze(info.get())) {
+ DeclarationScope::AllocateScopeInfos(info.get(), isolate_,
+ AnalyzeMode::kDebugger);
+ RetrieveScopeChain(scope);
+ }
}
} else {
// A failed reparse indicates that the preparser has diverged from the
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index 6288c11b94..cc30ddee61 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -50,7 +50,7 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
continue;
}
FeedbackSlot slot = vector->GetTypeProfileSlot();
- CollectTypeProfileNexus nexus(vector, slot);
+ FeedbackNexus nexus(vector, slot);
Handle<String> name(info->DebugName(), isolate);
std::vector<int> source_positions = nexus.GetSourcePositions();
for (int position : source_positions) {
@@ -60,7 +60,7 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
}
// Releases type profile data collected so far.
- nexus.Clear();
+ nexus.ResetTypeProfile();
}
if (!entries->empty()) {
result->emplace_back(type_profile_script);
@@ -91,8 +91,8 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfile::Mode mode) {
DCHECK(info->IsSubjectToDebugging());
if (info->feedback_metadata()->HasTypeProfileSlot()) {
FeedbackSlot slot = vector->GetTypeProfileSlot();
- CollectTypeProfileNexus nexus(vector, slot);
- nexus.Clear();
+ FeedbackNexus nexus(vector, slot);
+ nexus.ResetTypeProfile();
}
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index c087a0868c..69eaeb6cad 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -12,7 +12,6 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/liveedit.h"
@@ -21,6 +20,7 @@
#include "src/frames-inl.h"
#include "src/global-handles.h"
#include "src/globals.h"
+#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/log.h"
@@ -29,8 +29,6 @@
#include "src/snapshot/natives.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "include/v8-debug.h"
-
namespace v8 {
namespace internal {
@@ -53,6 +51,9 @@ Debug::Debug(Isolate* isolate)
BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame) {
+ if (debug_info->CanBreakAtEntry()) {
+ return BreakLocation(Debug::kBreakAtEntryPosition, DEBUG_BREAK_AT_ENTRY);
+ }
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
@@ -64,6 +65,7 @@ BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
void BreakLocation::AllAtCurrentStatement(
Handle<DebugInfo> debug_info, JavaScriptFrame* frame,
std::vector<BreakLocation>* result_out) {
+ DCHECK(!debug_info->CanBreakAtEntry());
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
@@ -81,6 +83,18 @@ void BreakLocation::AllAtCurrentStatement(
}
}
+JSGeneratorObject* BreakLocation::GetGeneratorObjectForSuspendedFrame(
+ JavaScriptFrame* frame) const {
+ DCHECK(IsSuspend());
+ DCHECK_GE(generator_obj_reg_index_, 0);
+
+ Object* generator_obj =
+ InterpretedFrame::cast(frame)->ReadInterpreterRegister(
+ generator_obj_reg_index_);
+
+ return JSGeneratorObject::cast(generator_obj);
+}
+
int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
Handle<AbstractCode> abstract_code,
int offset) {
@@ -103,13 +117,18 @@ int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
bool BreakLocation::HasBreakPoint(Handle<DebugInfo> debug_info) const {
// First check whether there is a break point with the same source position.
if (!debug_info->HasBreakPoint(position_)) return false;
- // Then check whether a break point at that source position would have
- // the same code offset. Otherwise it's just a break location that we can
- // step to, but not actually a location where we can put a break point.
- DCHECK(abstract_code_->IsBytecodeArray());
- BreakIterator it(debug_info);
- it.SkipToPosition(position_);
- return it.code_offset() == code_offset_;
+ if (debug_info->CanBreakAtEntry()) {
+ DCHECK_EQ(Debug::kBreakAtEntryPosition, position_);
+ return debug_info->BreakAtEntry();
+ } else {
+ // Then check whether a break point at that source position would have
+ // the same code offset. Otherwise it's just a break location that we can
+ // step to, but not actually a location where we can put a break point.
+ DCHECK(abstract_code_->IsBytecodeArray());
+ BreakIterator it(debug_info);
+ it.SkipToPosition(position_);
+ return it.code_offset() == code_offset_;
+ }
}
debug::BreakLocationType BreakLocation::type() const {
@@ -120,10 +139,12 @@ debug::BreakLocationType BreakLocation::type() const {
return debug::kCallBreakLocation;
case DEBUG_BREAK_SLOT_AT_RETURN:
return debug::kReturnBreakLocation;
+
+ // Externally, suspend breaks should look like normal breaks.
+ case DEBUG_BREAK_SLOT_AT_SUSPEND:
default:
return debug::kCommonBreakLocation;
}
- return debug::kCommonBreakLocation;
}
BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
@@ -181,10 +202,18 @@ DebugBreakType BreakIterator::GetDebugBreakType() {
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+ // Make sure we read the actual bytecode, not a prefix scaling bytecode.
+ if (interpreter::Bytecodes::IsPrefixScalingBytecode(bytecode)) {
+ bytecode = interpreter::Bytecodes::FromByte(
+ bytecode_array->get(code_offset() + 1));
+ }
+
if (bytecode == interpreter::Bytecode::kDebugger) {
return DEBUGGER_STATEMENT;
} else if (bytecode == interpreter::Bytecode::kReturn) {
return DEBUG_BREAK_SLOT_AT_RETURN;
+ } else if (bytecode == interpreter::Bytecode::kSuspendGenerator) {
+ return DEBUG_BREAK_SLOT_AT_SUSPEND;
} else if (interpreter::Bytecodes::IsCallOrConstruct(bytecode)) {
return DEBUG_BREAK_SLOT_AT_CALL;
} else if (source_position_iterator_.is_statement()) {
@@ -225,7 +254,25 @@ void BreakIterator::ClearDebugBreak() {
BreakLocation BreakIterator::GetBreakLocation() {
Handle<AbstractCode> code(
AbstractCode::cast(debug_info_->DebugBytecodeArray()));
- return BreakLocation(code, GetDebugBreakType(), code_offset(), position_);
+ DebugBreakType type = GetDebugBreakType();
+ int generator_object_reg_index = -1;
+ if (type == DEBUG_BREAK_SLOT_AT_SUSPEND) {
+ // For suspend break, we'll need the generator object to be able to step
+ // over the suspend as if it didn't return. We get the interpreter register
+ // index that holds the generator object by reading it directly off the
+ // bytecode array, and we'll read the actual generator object off the
+ // interpreter stack frame in GetGeneratorObjectForSuspendedFrame.
+ BytecodeArray* bytecode_array = debug_info_->OriginalBytecodeArray();
+ interpreter::BytecodeArrayAccessor accessor(handle(bytecode_array),
+ code_offset());
+
+ DCHECK_EQ(accessor.current_bytecode(),
+ interpreter::Bytecode::kSuspendGenerator);
+ interpreter::Register generator_obj_reg = accessor.GetRegisterOperand(0);
+ generator_object_reg_index = generator_obj_reg.index();
+ }
+ return BreakLocation(code, type, code_offset(), position_,
+ generator_object_reg_index);
}
@@ -276,9 +323,11 @@ char* Debug::RestoreDebug(char* storage) {
int Debug::ArchiveSpacePerThread() { return 0; }
void Debug::Iterate(RootVisitor* v) {
- v->VisitRootPointer(Root::kDebug, &thread_local_.return_value_);
- v->VisitRootPointer(Root::kDebug, &thread_local_.suspended_generator_);
- v->VisitRootPointer(Root::kDebug, &thread_local_.ignore_step_into_function_);
+ v->VisitRootPointer(Root::kDebug, nullptr, &thread_local_.return_value_);
+ v->VisitRootPointer(Root::kDebug, nullptr,
+ &thread_local_.suspended_generator_);
+ v->VisitRootPointer(Root::kDebug, nullptr,
+ &thread_local_.ignore_step_into_function_);
}
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info) : next_(nullptr) {
@@ -346,7 +395,7 @@ void Debug::Unload() {
debug_context_ = Handle<Context>();
}
-void Debug::Break(JavaScriptFrame* frame) {
+void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// Initialize LiveEdit.
LiveEdit::InitializeThreadLocal(this);
@@ -362,8 +411,7 @@ void Debug::Break(JavaScriptFrame* frame) {
DisableBreak no_recursive_break(this);
// Return if we fail to retrieve debug info.
- Handle<JSFunction> function(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(break_target->shared());
if (!EnsureBreakInfo(shared)) return;
Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
@@ -381,6 +429,14 @@ void Debug::Break(JavaScriptFrame* frame) {
return;
}
+ // Debug break at function entry, do not worry about stepping.
+ if (location.IsDebugBreakAtEntry()) {
+ DCHECK(debug_info->BreakAtEntry());
+ return;
+ }
+
+ DCHECK_NOT_NULL(frame);
+
// No break point. Check for stepping.
StepAction step_action = last_step_action();
int current_frame_count = CurrentFrameCount();
@@ -390,7 +446,7 @@ void Debug::Break(JavaScriptFrame* frame) {
// StepOut at not return position was requested and return break locations
// were flooded with one shots.
if (thread_local_.fast_forward_to_return_) {
- DCHECK(location.IsReturn());
+ DCHECK(location.IsReturnOrSuspend());
// We have to ignore recursive calls to function.
if (current_frame_count > target_frame_count) return;
ClearStepping();
@@ -410,8 +466,17 @@ void Debug::Break(JavaScriptFrame* frame) {
case StepNext:
// Step next should not break in a deeper frame than target frame.
if (current_frame_count > target_frame_count) return;
- // Fall through.
+ V8_FALLTHROUGH;
case StepIn: {
+ // Special case "next" and "in" for generators that are about to suspend.
+ if (location.IsSuspend()) {
+ DCHECK(!has_suspended_generator());
+ thread_local_.suspended_generator_ =
+ location.GetGeneratorObjectForSuspendedFrame(frame);
+ ClearStepping();
+ return;
+ }
+
FrameSummary summary = FrameSummary::GetTop(frame);
step_break = step_break || location.IsReturn() ||
current_frame_count != last_frame_count ||
@@ -445,9 +510,9 @@ MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
if (has_break_points) *has_break_points = has_break_points_to_check;
if (!has_break_points_to_check) return {};
- Handle<Object> break_point_objects =
- debug_info->GetBreakPointObjects(location->position());
- return Debug::GetHitBreakPointObjects(break_point_objects);
+ Handle<Object> break_points =
+ debug_info->GetBreakPoints(location->position());
+ return Debug::GetHitBreakPoints(break_points);
}
@@ -502,52 +567,27 @@ MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
// Check whether a single break point object is triggered.
-bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
- Factory* factory = isolate_->factory();
+bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point) {
HandleScope scope(isolate_);
- // TODO(kozyatinskiy): replace this if by DCHEK once the JS debug API has been
- // removed.
- if (break_point_object->IsBreakPoint()) {
- Handle<BreakPoint> break_point =
- Handle<BreakPoint>::cast(break_point_object);
- if (!break_point->condition()->length()) return true;
- Handle<String> condition(break_point->condition());
- Handle<Object> result;
- // Since we call CheckBreakpoint only for deoptimized frame on top of stack,
- // we can use 0 as index of inlined frame.
- if (!DebugEvaluate::Local(isolate_, break_frame_id(),
- /* inlined_jsframe_index */ 0, condition, false)
- .ToHandle(&result)) {
- if (isolate_->has_pending_exception()) {
- isolate_->clear_pending_exception();
- }
- return false;
- }
- return result->BooleanValue();
- }
-
- // Ignore check if break point object is not a JSObject.
- if (!break_point_object->IsJSObject()) return true;
-
- // Get the break id as an object.
- Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
-
- // Call IsBreakPointTriggered.
- Handle<Object> argv[] = { break_id, break_point_object };
+ if (!break_point->condition()->length()) return true;
+ Handle<String> condition(break_point->condition());
Handle<Object> result;
- if (!CallFunction("IsBreakPointTriggered", arraysize(argv), argv)
+ // Since we call CheckBreakpoint only for deoptimized frame on top of stack,
+ // we can use 0 as index of inlined frame.
+ if (!DebugEvaluate::Local(isolate_, break_frame_id(),
+ /* inlined_jsframe_index */ 0, condition, false)
.ToHandle(&result)) {
+ if (isolate_->has_pending_exception()) {
+ isolate_->clear_pending_exception();
+ }
return false;
}
-
- // Return whether the break point is triggered.
- return result->IsTrue(isolate_);
+ return result->BooleanValue();
}
-
bool Debug::SetBreakPoint(Handle<JSFunction> function,
- Handle<Object> break_point_object,
+ Handle<BreakPoint> break_point,
int* source_position) {
HandleScope scope(isolate_);
@@ -561,7 +601,7 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Find the break point and change it.
*source_position = FindBreakablePosition(debug_info, *source_position);
- DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
+ DebugInfo::SetBreakPoint(debug_info, *source_position, break_point);
// At least one active break point now.
DCHECK_LT(0, debug_info->GetBreakPointCount());
@@ -573,13 +613,13 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
}
bool Debug::SetBreakPointForScript(Handle<Script> script,
- Handle<Object> break_point_object,
+ Handle<BreakPoint> break_point,
int* source_position) {
if (script->type() == Script::TYPE_WASM) {
Handle<WasmCompiledModule> compiled_module(
WasmCompiledModule::cast(script->wasm_compiled_module()), isolate_);
return WasmCompiledModule::SetBreakPoint(compiled_module, source_position,
- break_point_object);
+ break_point);
}
HandleScope scope(isolate_);
@@ -609,7 +649,7 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
if (breakable_position < *source_position) return false;
*source_position = breakable_position;
- DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
+ DebugInfo::SetBreakPoint(debug_info, *source_position, break_point);
// At least one active break point now.
DCHECK_LT(0, debug_info->GetBreakPointCount());
@@ -622,48 +662,60 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
int source_position) {
- DCHECK(debug_info->HasDebugBytecodeArray());
- BreakIterator it(debug_info);
- it.SkipToPosition(source_position);
- return it.position();
+ if (debug_info->CanBreakAtEntry()) {
+ return kBreakAtEntryPosition;
+ } else {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ BreakIterator it(debug_info);
+ it.SkipToPosition(source_position);
+ return it.position();
+ }
}
void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
DisallowHeapAllocation no_gc;
- if (debug_info->break_points()->IsUndefined(isolate_)) return;
- FixedArray* break_points = debug_info->break_points();
- for (int i = 0; i < break_points->length(); i++) {
- if (break_points->get(i)->IsUndefined(isolate_)) continue;
- BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
- if (info->GetBreakPointCount() == 0) continue;
- DCHECK(debug_info->HasDebugBytecodeArray());
- BreakIterator it(debug_info);
- it.SkipToPosition(info->source_position());
- it.SetDebugBreak();
+ if (debug_info->CanBreakAtEntry()) {
+ debug_info->SetBreakAtEntry();
+ } else {
+ if (!debug_info->HasDebugBytecodeArray()) return;
+ FixedArray* break_points = debug_info->break_points();
+ for (int i = 0; i < break_points->length(); i++) {
+ if (break_points->get(i)->IsUndefined(isolate_)) continue;
+ BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
+ if (info->GetBreakPointCount() == 0) continue;
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ BreakIterator it(debug_info);
+ it.SkipToPosition(info->source_position());
+ it.SetDebugBreak();
+ }
}
}
void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
- // If we attempt to clear breakpoints but none exist, simply return. This can
- // happen e.g. CoverageInfos exit but no breakpoints are set.
- if (!debug_info->HasDebugBytecodeArray()) return;
+ if (debug_info->CanBreakAtEntry()) {
+ debug_info->ClearBreakAtEntry();
+ } else {
+ // If we attempt to clear breakpoints but none exist, simply return. This
+ // can happen e.g. CoverageInfos exist but no breakpoints are set.
+ if (!debug_info->HasDebugBytecodeArray()) return;
- DisallowHeapAllocation no_gc;
- for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
- it.ClearDebugBreak();
+ DisallowHeapAllocation no_gc;
+ for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
+ it.ClearDebugBreak();
+ }
}
}
-void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
+void Debug::ClearBreakPoint(Handle<BreakPoint> break_point) {
HandleScope scope(isolate_);
for (DebugInfoListNode* node = debug_info_list_; node != nullptr;
node = node->next()) {
Handle<Object> result =
- DebugInfo::FindBreakPointInfo(node->debug_info(), break_point_object);
+ DebugInfo::FindBreakPointInfo(node->debug_info(), break_point);
if (result->IsUndefined(isolate_)) continue;
Handle<DebugInfo> debug_info = node->debug_info();
- if (DebugInfo::ClearBreakPoint(debug_info, break_point_object)) {
+ if (DebugInfo::ClearBreakPoint(debug_info, break_point)) {
ClearBreakPoints(debug_info);
if (debug_info->GetBreakPointCount() == 0) {
RemoveBreakInfoAndMaybeFree(debug_info);
@@ -707,7 +759,7 @@ void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
// Flood the function with break points.
DCHECK(debug_info->HasDebugBytecodeArray());
for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
- if (returns_only && !it.GetBreakLocation().IsReturn()) continue;
+ if (returns_only && !it.GetBreakLocation().IsReturnOrSuspend()) continue;
it.SetDebugBreak();
}
}
@@ -729,25 +781,24 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
}
-MaybeHandle<FixedArray> Debug::GetHitBreakPointObjects(
- Handle<Object> break_point_objects) {
- DCHECK(!break_point_objects->IsUndefined(isolate_));
- if (!break_point_objects->IsFixedArray()) {
- if (!CheckBreakPoint(break_point_objects)) return {};
+MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<Object> break_points) {
+ DCHECK(!break_points->IsUndefined(isolate_));
+ if (!break_points->IsFixedArray()) {
+ if (!CheckBreakPoint(Handle<BreakPoint>::cast(break_points))) return {};
Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(1);
- break_points_hit->set(0, *break_point_objects);
+ break_points_hit->set(0, *break_points);
return break_points_hit;
}
- Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+ Handle<FixedArray> array(FixedArray::cast(*break_points));
int num_objects = array->length();
Handle<FixedArray> break_points_hit =
isolate_->factory()->NewFixedArray(num_objects);
int break_points_hit_count = 0;
for (int i = 0; i < num_objects; ++i) {
- Handle<Object> break_point_object(array->get(i), isolate_);
- if (CheckBreakPoint(break_point_object)) {
- break_points_hit->set(break_points_hit_count++, *break_point_object);
+ Handle<Object> break_point(array->get(i), isolate_);
+ if (CheckBreakPoint(Handle<BreakPoint>::cast(break_point))) {
+ break_points_hit->set(break_points_hit_count++, *break_point);
}
}
if (break_points_hit_count == 0) return {};
@@ -824,11 +875,10 @@ void Debug::PrepareStepOnThrow() {
if (summaries.size() > 1) {
Handle<AbstractCode> code = summary.AsJavaScript().abstract_code();
CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
- BytecodeArray* bytecode = code->GetBytecodeArray();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+ HandlerTable table(code->GetBytecodeArray());
int code_offset = summary.code_offset();
HandlerTable::CatchPrediction prediction;
- int index = table->LookupRange(code_offset, nullptr, &prediction);
+ int index = table.LookupRange(code_offset, nullptr, &prediction);
if (index > 0) found_handler = true;
} else {
found_handler = true;
@@ -879,7 +929,7 @@ void Debug::PrepareStep(StepAction step_action) {
if (frame->is_wasm_compiled()) return;
WasmInterpreterEntryFrame* wasm_frame =
WasmInterpreterEntryFrame::cast(frame);
- wasm_frame->wasm_instance()->debug_info()->PrepareStep(step_action);
+ wasm_frame->debug_info()->PrepareStep(step_action);
return;
}
@@ -895,9 +945,9 @@ void Debug::PrepareStep(StepAction step_action) {
BreakLocation location = BreakLocation::FromFrame(debug_info, js_frame);
- // Any step at a return is a step-out and we need to schedule DebugOnFunction
- // call callback.
- if (location.IsReturn()) {
+ // Any step at a return is a step-out, and a step-out at a suspend behaves
+ // like a return.
+ if (location.IsReturn() || (location.IsSuspend() && step_action == StepOut)) {
// On StepOut we'll ignore our further calls to current function in
// PrepareStepIn callback.
if (last_step_action() == StepOut) {
@@ -906,6 +956,8 @@ void Debug::PrepareStep(StepAction step_action) {
step_action = StepOut;
thread_local_.last_step_action_ = StepIn;
}
+
+ // We need to schedule DebugOnFunction call callback
UpdateHookOnFunctionCall();
// A step-next in blackboxed function is a step-out.
@@ -926,7 +978,7 @@ void Debug::PrepareStep(StepAction step_action) {
// Clear last position info. For stepping out it does not matter.
thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_frame_count_ = -1;
- if (!location.IsReturn() && !IsBlackboxed(shared)) {
+ if (!location.IsReturnOrSuspend() && !IsBlackboxed(shared)) {
// At not return position we flood return positions with one shots and
// will repeat StepOut automatically at next break.
thread_local_.target_frame_count_ = current_frame_count;
@@ -966,7 +1018,7 @@ void Debug::PrepareStep(StepAction step_action) {
}
case StepNext:
thread_local_.target_frame_count_ = current_frame_count;
- // Fall through.
+ V8_FALLTHROUGH;
case StepIn:
// TODO(clemensh): Implement stepping from JS into wasm.
FloodWithOneShot(shared);
@@ -1060,10 +1112,7 @@ class RedirectActiveFunctions : public ThreadVisitor {
void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
// Deoptimize all code compiled from this shared function info including
// inlining.
- if (isolate_->concurrent_recompilation_enabled()) {
- isolate_->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kBlock);
- }
+ isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
@@ -1094,11 +1143,16 @@ void Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
if (debug_info->IsPreparedForBreakpoints()) return;
- DeoptimizeFunction(shared);
- // Update PCs on the stack to point to recompiled code.
- RedirectActiveFunctions redirect_visitor(*shared);
- redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
- isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
+ if (debug_info->CanBreakAtEntry()) {
+ // Deopt everything in case the function is inlined anywhere.
+ Deoptimizer::DeoptimizeAll(isolate_);
+ } else {
+ DeoptimizeFunction(shared);
+ // Update PCs on the stack to point to recompiled code.
+ RedirectActiveFunctions redirect_visitor(*shared);
+ redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
+ isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
+ }
debug_info->set_flags(debug_info->flags() |
DebugInfo::kPreparedForBreakpoints);
@@ -1184,19 +1238,6 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
UNREACHABLE();
}
-void Debug::RecordGenerator(Handle<JSGeneratorObject> generator_object) {
- if (last_step_action() <= StepOut) return;
-
- if (last_step_action() == StepNext) {
- // Only consider this generator a step-next target if not stepping in.
- if (thread_local_.target_frame_count_ < CurrentFrameCount()) return;
- }
-
- DCHECK(!has_suspended_generator());
- thread_local_.suspended_generator_ = *generator_object;
- ClearStepping();
-}
-
class SharedFunctionInfoFinder {
public:
explicit SharedFunctionInfoFinder(int target_position)
@@ -1300,7 +1341,9 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
// Return if we already have the break info for shared.
if (shared->HasBreakInfo()) return true;
- if (!shared->IsSubjectToDebugging()) return false;
+ if (!shared->IsSubjectToDebugging() && !CanBreakAtEntry(shared)) {
+ return false;
+ }
if (!shared->is_compiled() &&
!Compiler::Compile(shared, Compiler::CLEAR_EXCEPTION)) {
return false;
@@ -1328,7 +1371,10 @@ void Debug::CreateBreakInfo(Handle<SharedFunctionInfo> shared) {
maybe_debug_bytecode_array = factory->CopyBytecodeArray(original);
}
- debug_info->set_flags(debug_info->flags() | DebugInfo::kHasBreakInfo);
+ int flags = debug_info->flags();
+ flags |= DebugInfo::kHasBreakInfo;
+ if (CanBreakAtEntry(shared)) flags |= DebugInfo::kCanBreakAtEntry;
+ debug_info->set_flags(flags);
debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
debug_info->set_break_points(*break_points);
}
@@ -1494,14 +1540,6 @@ MaybeHandle<Object> Debug::MakeExecutionState() {
}
-MaybeHandle<Object> Debug::MakeBreakEvent(Handle<Object> break_points_hit) {
- // Create the new break event object.
- Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()),
- break_points_hit };
- return CallFunction("MakeBreakEvent", arraysize(argv), argv);
-}
-
-
MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception,
bool uncaught,
Handle<Object> promise) {
@@ -1677,29 +1715,15 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
int inspector_break_points_count = 0;
// This array contains breakpoints installed using JS debug API.
for (int i = 0; i < break_points_hit->length(); ++i) {
- Object* break_point = break_points_hit->get(i);
- if (break_point->IsBreakPoint()) {
- inspector_break_points_hit.push_back(BreakPoint::cast(break_point)->id());
- ++inspector_break_points_count;
- } else {
- break_points_hit->set(i - inspector_break_points_count, break_point);
- }
- }
- int break_points_length =
- break_points_hit->length() - inspector_break_points_count;
- Handle<Object> break_points;
- if (break_points_length) {
- break_points_hit->Shrink(break_points_length);
- break_points = isolate_->factory()->NewJSArrayWithElements(
- break_points_hit, PACKED_ELEMENTS, break_points_length);
- } else {
- break_points = isolate_->factory()->undefined_value();
+ BreakPoint* break_point = BreakPoint::cast(break_points_hit->get(i));
+ inspector_break_points_hit.push_back(break_point->id());
+ ++inspector_break_points_count;
}
debug_delegate_->BreakProgramRequested(
GetDebugEventContext(isolate_),
v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
- v8::Utils::ToLocal(break_points), inspector_break_points_hit);
+ inspector_break_points_hit);
}
@@ -1860,6 +1884,16 @@ bool Debug::AllFramesOnStackAreBlackboxed() {
return true;
}
+bool Debug::CanBreakAtEntry(Handle<SharedFunctionInfo> shared) {
+ // Allow break at entry for builtin functions.
+ if (shared->native()) {
+ // Functions that are subject to debugging can have regular breakpoints.
+ DCHECK(!shared->IsSubjectToDebugging());
+ return true;
+ }
+ return false;
+}
+
bool Debug::SetScriptSource(Handle<Script> script, Handle<String> source,
bool preview, bool* stack_changed) {
DebugScope debug_scope(this);
@@ -1971,6 +2005,7 @@ void Debug::UpdateHookOnFunctionCall() {
STATIC_ASSERT(LastStepAction == StepIn);
hook_on_function_call_ = thread_local_.last_step_action_ == StepIn ||
isolate_->needs_side_effect_check();
+ DCHECK_IMPLIES(hook_on_function_call_, is_active_);
}
MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
@@ -2146,7 +2181,6 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
return false;
}
- Deoptimizer::DeoptimizeFunction(*function);
if (!SharedFunctionInfo::HasNoSideEffect(handle(function->shared()))) {
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Function %s failed side effect check.\n",
@@ -2195,16 +2229,9 @@ void LegacyDebugDelegate::ScriptCompiled(v8::Local<v8::debug::Script> script,
void LegacyDebugDelegate::BreakProgramRequested(
v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<debug::BreakpointId>&) {
- Handle<Object> event_data;
- if (isolate_->debug()
- ->MakeBreakEvent(v8::Utils::OpenHandle(*break_points_hit))
- .ToHandle(&event_data)) {
- ProcessDebugEvent(
- v8::Break, Handle<JSObject>::cast(event_data),
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
- }
+ ProcessDebugEvent(v8::Break, isolate_->factory()->NewJSObjectWithNullProto(),
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
}
void LegacyDebugDelegate::ExceptionThrown(v8::Local<v8::Context> paused_context,
@@ -2231,32 +2258,6 @@ void LegacyDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
}
}
-JavaScriptDebugDelegate::JavaScriptDebugDelegate(Isolate* isolate,
- Handle<JSFunction> listener,
- Handle<Object> data)
- : LegacyDebugDelegate(isolate) {
- GlobalHandles* global_handles = isolate->global_handles();
- listener_ = global_handles->Create(*listener);
- data_ = global_handles->Create(*data);
-}
-
-JavaScriptDebugDelegate::~JavaScriptDebugDelegate() {
- GlobalHandles::Destroy(Handle<Object>::cast(listener_).location());
- GlobalHandles::Destroy(data_.location());
-}
-
-void JavaScriptDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
- Handle<JSObject> exec_state) {
- AllowJavascriptExecutionDebugOnly allow_script(isolate_);
- Handle<Object> argv[] = {Handle<Object>(Smi::FromInt(event), isolate_),
- exec_state, event_data, data_};
- Handle<JSReceiver> global = isolate_->global_proxy();
- // Listener must not throw.
- Execution::Call(isolate_, listener_, global, arraysize(argv), argv)
- .ToHandleChecked();
-}
-
NativeDebugDelegate::NativeDebugDelegate(Isolate* isolate,
v8::Debug::EventCallback callback,
Handle<Object> data)
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index aec66f2f35..4ea9c2b872 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -25,12 +25,9 @@
#include "src/string-stream.h"
#include "src/v8threads.h"
-#include "include/v8-debug.h"
-
namespace v8 {
namespace internal {
-
// Forward declarations.
class DebugScope;
@@ -51,13 +48,14 @@ enum ExceptionBreakType {
BreakUncaughtException = 1
};
-
enum DebugBreakType {
NOT_DEBUG_BREAK,
DEBUGGER_STATEMENT,
DEBUG_BREAK_SLOT,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_RETURN,
+ DEBUG_BREAK_SLOT_AT_SUSPEND,
+ DEBUG_BREAK_AT_ENTRY,
};
enum IgnoreBreakMode {
@@ -74,12 +72,20 @@ class BreakLocation {
JavaScriptFrame* frame,
std::vector<BreakLocation>* result_out);
+ inline bool IsSuspend() const { return type_ == DEBUG_BREAK_SLOT_AT_SUSPEND; }
inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
+ inline bool IsReturnOrSuspend() const {
+ return type_ >= DEBUG_BREAK_SLOT_AT_RETURN;
+ }
inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
inline bool IsDebuggerStatement() const {
return type_ == DEBUGGER_STATEMENT;
}
+ inline bool IsDebugBreakAtEntry() const {
+ bool result = type_ == DEBUG_BREAK_AT_ENTRY;
+ return result;
+ }
bool HasBreakPoint(Handle<DebugInfo> debug_info) const;
@@ -87,16 +93,26 @@ class BreakLocation {
debug::BreakLocationType type() const;
+ JSGeneratorObject* GetGeneratorObjectForSuspendedFrame(
+ JavaScriptFrame* frame) const;
+
private:
BreakLocation(Handle<AbstractCode> abstract_code, DebugBreakType type,
- int code_offset, int position)
+ int code_offset, int position, int generator_obj_reg_index)
: abstract_code_(abstract_code),
code_offset_(code_offset),
type_(type),
- position_(position) {
+ position_(position),
+ generator_obj_reg_index_(generator_obj_reg_index) {
DCHECK_NE(NOT_DEBUG_BREAK, type_);
}
+ BreakLocation(int position, DebugBreakType type)
+ : code_offset_(0),
+ type_(type),
+ position_(position),
+ generator_obj_reg_index_(0) {}
+
static int BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
Handle<AbstractCode> abstract_code,
int offset);
@@ -108,6 +124,7 @@ class BreakLocation {
int code_offset_;
DebugBreakType type_;
int position_;
+ int generator_obj_reg_index_;
friend class BreakIterator;
};
@@ -215,19 +232,20 @@ class Debug {
// Internal logic
bool Load();
- void Break(JavaScriptFrame* frame);
+ // The break target may not be the top-most frame, since we may be
+ // breaking before entering a function that cannot contain break points.
+ void Break(JavaScriptFrame* frame, Handle<JSFunction> break_target);
// Scripts handling.
Handle<FixedArray> GetLoadedScripts();
// Break point handling.
bool SetBreakPoint(Handle<JSFunction> function,
- Handle<Object> break_point_object,
- int* source_position);
+ Handle<BreakPoint> break_point, int* source_position);
bool SetBreakPointForScript(Handle<Script> script,
- Handle<Object> break_point_object,
+ Handle<BreakPoint> break_point,
int* source_position);
- void ClearBreakPoint(Handle<Object> break_point_object);
+ void ClearBreakPoint(Handle<BreakPoint> break_point);
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
@@ -235,12 +253,11 @@ class Debug {
int* offset, int* id);
void RemoveBreakpoint(int id);
- // The parameter is either a BreakPointInfo object, or a FixedArray of
- // BreakPointInfo objects.
+ // The parameter is either a BreakPoint object, or a FixedArray of
+ // BreakPoint objects.
// Returns an empty handle if no breakpoint is hit, or a FixedArray with all
- // hit breakpoints.
- MaybeHandle<FixedArray> GetHitBreakPointObjects(
- Handle<Object> break_point_objects);
+ // hit BreakPoint objects.
+ MaybeHandle<FixedArray> GetHitBreakPoints(Handle<Object> break_points);
// Stepping handling.
void PrepareStep(StepAction step_action);
@@ -256,8 +273,6 @@ class Debug {
int end_position, bool restrict_to_function,
std::vector<BreakLocation>* locations);
- void RecordGenerator(Handle<JSGeneratorObject> generator_object);
-
void RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
Handle<Object> parent);
@@ -265,6 +280,8 @@ class Debug {
bool IsBlackboxed(Handle<SharedFunctionInfo> shared);
+ bool CanBreakAtEntry(Handle<SharedFunctionInfo> shared);
+
void SetDebugDelegate(debug::DebugDelegate* delegate, bool pass_ownership);
// Returns whether the operation succeeded.
@@ -339,6 +356,10 @@ class Debug {
inline bool in_debug_scope() const {
return !!base::Relaxed_Load(&thread_local_.current_debug_scope_);
}
+ inline bool needs_check_on_function_call() const {
+ return hook_on_function_call_;
+ }
+
void set_break_points_active(bool v) { break_points_active_ = v; }
bool break_points_active() const { return break_points_active_; }
@@ -376,6 +397,10 @@ class Debug {
DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
+ // For functions in which we cannot set a break point, use a canonical
+ // source position for break points.
+ static const int kBreakAtEntryPosition = 0;
+
private:
explicit Debug(Isolate* isolate);
~Debug() { DCHECK_NULL(debug_delegate_); }
@@ -410,8 +435,6 @@ class Debug {
// Constructors for debug event objects.
MUST_USE_RESULT MaybeHandle<Object> MakeExecutionState();
- MUST_USE_RESULT MaybeHandle<Object> MakeBreakEvent(
- Handle<Object> break_points_hit);
MUST_USE_RESULT MaybeHandle<Object> MakeExceptionEvent(
Handle<Object> exception,
bool uncaught,
@@ -445,7 +468,7 @@ class Debug {
BreakLocation* location,
bool* has_break_points = nullptr);
bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
- bool CheckBreakPoint(Handle<Object> break_point_object);
+ bool CheckBreakPoint(Handle<BreakPoint> break_point);
MaybeHandle<Object> CallFunction(const char* name, int argc,
Handle<Object> args[],
bool catch_exceptions = true);
@@ -577,7 +600,6 @@ class LegacyDebugDelegate : public v8::debug::DebugDelegate {
bool has_compile_error) override;
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<debug::BreakpointId>&) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
@@ -599,20 +621,6 @@ class LegacyDebugDelegate : public v8::debug::DebugDelegate {
Handle<JSObject> exec_state) = 0;
};
-class JavaScriptDebugDelegate : public LegacyDebugDelegate {
- public:
- JavaScriptDebugDelegate(Isolate* isolate, Handle<JSFunction> listener,
- Handle<Object> data);
- virtual ~JavaScriptDebugDelegate();
-
- private:
- void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
- Handle<JSObject> exec_state) override;
-
- Handle<JSFunction> listener_;
- Handle<Object> data_;
-};
-
class NativeDebugDelegate : public LegacyDebugDelegate {
public:
NativeDebugDelegate(Isolate* isolate, v8::Debug::EventCallback callback,
@@ -630,7 +638,6 @@ class NativeDebugDelegate : public LegacyDebugDelegate {
virtual v8::Local<v8::Object> GetEventData() const;
virtual v8::Local<v8::Context> GetEventContext() const;
virtual v8::Local<v8::Value> GetCallbackData() const;
- virtual v8::Debug::ClientData* GetClientData() const { return nullptr; }
virtual v8::Isolate* GetIsolate() const;
private:
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 43be3c424a..97a0886ca7 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -19,16 +19,8 @@ var ValueMirror = global.ValueMirror;
//----------------------------------------------------------------------------
-// Default number of frames to include in the response to backtrace request.
-var kDefaultBacktraceLength = 10;
-
var Debug = {};
-// Regular expression to skip "crud" at the beginning of a source line which is
-// not really code. Currently the regular expression matches whitespace and
-// comments.
-var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
-
// Debug events which can occur in the V8 JavaScript engine. These originate
// from the API include file debug.h.
Debug.DebugEvent = { Break: 1,
@@ -58,29 +50,12 @@ Debug.ScriptCompilationType = { Host: 0,
Eval: 1,
JSON: 2 };
-// The different script break point types.
-Debug.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1,
- ScriptRegExp: 2 };
-
function ScriptTypeFlag(type) {
return (1 << type);
}
// Globals.
-var next_response_seq = 0;
-var next_break_point_number = 1;
-var break_points = [];
-var script_break_points = [];
var debugger_flags = {
- breakPointsActive: {
- value: true,
- getValue: function() { return this.value; },
- setValue: function(value) {
- this.value = !!value;
- %SetBreakPointsActive(this.value);
- }
- },
breakOnCaughtException: {
getValue: function() { return Debug.isBreakOnException(); },
setValue: function(value) {
@@ -104,308 +79,6 @@ var debugger_flags = {
};
-// Create a new break point object and add it to the list of break points.
-function MakeBreakPoint(source_position, opt_script_break_point) {
- var break_point = new BreakPoint(source_position, opt_script_break_point);
- break_points.push(break_point);
- return break_point;
-}
-
-
-// Object representing a break point.
-// NOTE: This object does not have a reference to the function having break
-// point as this would cause function not to be garbage collected when it is
-// not used any more. We do not want break points to keep functions alive.
-function BreakPoint(source_position, opt_script_break_point) {
- this.source_position_ = source_position;
- if (opt_script_break_point) {
- this.script_break_point_ = opt_script_break_point;
- } else {
- this.number_ = next_break_point_number++;
- }
- this.active_ = true;
- this.condition_ = null;
-}
-
-
-BreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-BreakPoint.prototype.func = function() {
- return this.func_;
-};
-
-
-BreakPoint.prototype.source_position = function() {
- return this.source_position_;
-};
-
-
-BreakPoint.prototype.active = function() {
- if (this.script_break_point()) {
- return this.script_break_point().active();
- }
- return this.active_;
-};
-
-
-BreakPoint.prototype.condition = function() {
- if (this.script_break_point() && this.script_break_point().condition()) {
- return this.script_break_point().condition();
- }
- return this.condition_;
-};
-
-
-BreakPoint.prototype.script_break_point = function() {
- return this.script_break_point_;
-};
-
-
-BreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-BreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-BreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-BreakPoint.prototype.isTriggered = function(exec_state) {
- // Break point not active - not triggered.
- if (!this.active()) return false;
-
- // Check for conditional break point.
- if (this.condition()) {
- // If break point has condition try to evaluate it in the top frame.
- try {
- var mirror = exec_state.frame(0).evaluate(this.condition());
- // If no sensible mirror or non true value break point not triggered.
- if (!(mirror instanceof ValueMirror) || !mirror.value_) {
- return false;
- }
- } catch (e) {
- // Exception evaluating condition counts as not triggered.
- return false;
- }
- }
-
- // Break point triggered.
- return true;
-};
-
-
-// Function called from the runtime when a break point is hit. Returns true if
-// the break point is triggered and supposed to break execution.
-function IsBreakPointTriggered(break_id, break_point) {
- return break_point.isTriggered(MakeExecutionState(break_id));
-}
-
-
-// Object representing a script break point. The script is referenced by its
-// script name or script id and the break point is represented as line and
-// column.
-function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId) {
- this.type_ = type;
- if (type == Debug.ScriptBreakPointType.ScriptId) {
- this.script_id_ = script_id_or_name;
- } else if (type == Debug.ScriptBreakPointType.ScriptName) {
- this.script_name_ = script_id_or_name;
- } else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
- this.script_regexp_object_ = new GlobalRegExp(script_id_or_name);
- } else {
- throw %make_error(kDebugger, "Unexpected breakpoint type " + type);
- }
- this.line_ = opt_line || 0;
- this.column_ = opt_column;
- this.groupId_ = opt_groupId;
- this.active_ = true;
- this.condition_ = null;
- this.break_points_ = [];
-}
-
-
-ScriptBreakPoint.prototype.number = function() {
- return this.number_;
-};
-
-
-ScriptBreakPoint.prototype.groupId = function() {
- return this.groupId_;
-};
-
-
-ScriptBreakPoint.prototype.type = function() {
- return this.type_;
-};
-
-
-ScriptBreakPoint.prototype.script_id = function() {
- return this.script_id_;
-};
-
-
-ScriptBreakPoint.prototype.script_name = function() {
- return this.script_name_;
-};
-
-
-ScriptBreakPoint.prototype.script_regexp_object = function() {
- return this.script_regexp_object_;
-};
-
-
-ScriptBreakPoint.prototype.line = function() {
- return this.line_;
-};
-
-
-ScriptBreakPoint.prototype.column = function() {
- return this.column_;
-};
-
-
-ScriptBreakPoint.prototype.actual_locations = function() {
- var locations = [];
- for (var i = 0; i < this.break_points_.length; i++) {
- locations.push(this.break_points_[i].actual_location);
- }
- return locations;
-};
-
-
-ScriptBreakPoint.prototype.update_positions = function(line, column) {
- this.line_ = line;
- this.column_ = column;
-};
-
-
-ScriptBreakPoint.prototype.active = function() {
- return this.active_;
-};
-
-
-ScriptBreakPoint.prototype.condition = function() {
- return this.condition_;
-};
-
-
-ScriptBreakPoint.prototype.enable = function() {
- this.active_ = true;
-};
-
-
-ScriptBreakPoint.prototype.disable = function() {
- this.active_ = false;
-};
-
-
-ScriptBreakPoint.prototype.setCondition = function(condition) {
- this.condition_ = condition;
-};
-
-
-// Check whether a script matches this script break point. Currently this is
-// only based on script name.
-ScriptBreakPoint.prototype.matchesScript = function(script) {
- if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
- return this.script_id_ == script.id;
- } else {
- // We might want to account columns here as well.
- if (!(script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + %ScriptLineCount(script))) {
- return false;
- }
- if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
- return this.script_name_ == script.nameOrSourceURL();
- } else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
- return this.script_regexp_object_.test(script.nameOrSourceURL());
- } else {
- throw %make_error(kDebugger, "Unexpected breakpoint type " + this.type_);
- }
- }
-};
-
-
-// Set the script break point in a script.
-ScriptBreakPoint.prototype.set = function (script) {
- var column = this.column();
- var line = this.line();
- // If the column is undefined the break is on the line. To help locate the
- // first piece of breakable code on the line try to find the column on the
- // line which contains some source.
- if (IS_UNDEFINED(column)) {
- var source_line = %ScriptSourceLine(script, line || script.line_offset);
-
- // Allocate array for caching the columns where the actual source starts.
- if (!script.sourceColumnStart_) {
- script.sourceColumnStart_ = new GlobalArray(%ScriptLineCount(script));
- }
-
- // Fill cache if needed and get column where the actual source starts.
- if (IS_UNDEFINED(script.sourceColumnStart_[line])) {
- script.sourceColumnStart_[line] =
- source_line.match(sourceLineBeginningSkip)[0].length;
- }
- column = script.sourceColumnStart_[line];
- }
-
- // Convert the line and column into an absolute position within the script.
- var position = Debug.findScriptSourcePosition(script, this.line(), column);
-
- // If the position is not found in the script (the script might be shorter
- // than it used to be) just ignore it.
- if (IS_NULL(position)) return;
-
- // Create a break point object and set the break point.
- var break_point = MakeBreakPoint(position, this);
- var actual_position = %SetScriptBreakPoint(script, position,
- break_point);
- if (IS_UNDEFINED(actual_position)) {
- actual_position = position;
- }
- var actual_location = script.locationFromPosition(actual_position, true);
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column,
- script_id: script.id };
- this.break_points_.push(break_point);
- return break_point;
-};
-
-
-// Clear all the break points created from this script break point
-ScriptBreakPoint.prototype.clear = function () {
- var remaining_break_points = [];
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].script_break_point() &&
- break_points[i].script_break_point() === this) {
- %ClearBreakPoint(break_points[i]);
- } else {
- remaining_break_points.push(break_points[i]);
- }
- }
- break_points = remaining_break_points;
- this.break_points_ = [];
-};
-
-
-Debug.setListener = function(listener, opt_data) {
- if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
- throw %make_type_error(kDebuggerType);
- }
- %SetDebugEventListener(listener, opt_data);
-};
-
-
// Returns a Script object. If the parameter is a function the return value
// is the script in which the function is defined. If the parameter is a string
// the return value is the script for which the script name has that string
@@ -475,246 +148,6 @@ Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
return location ? location.position : null;
};
-
-Debug.findBreakPoint = function(break_point_number, remove) {
- var break_point;
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- break_point = break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- break_points.splice(i, 1);
- }
- break;
- }
- }
- if (break_point) {
- return break_point;
- } else {
- return this.findScriptBreakPoint(break_point_number, remove);
- }
-};
-
-Debug.findBreakPointActualLocations = function(break_point_number) {
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- return script_break_points[i].actual_locations();
- }
- }
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].number() == break_point_number) {
- return [break_points[i].actual_location];
- }
- }
- return [];
-};
-
-Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
- if (!IS_FUNCTION(func)) throw %make_type_error(kDebuggerType);
- // Break points in API functions are not supported.
- if (%FunctionIsAPIFunction(func)) {
- throw %make_error(kDebugger, 'Cannot set break point in native code.');
- }
- // Find source position.
- var source_position =
- this.findFunctionSourceLocation(func, opt_line, opt_column).position;
- // Find the script for the function.
- var script = %FunctionGetScript(func);
- // Break in builtin JavaScript code is not supported.
- if (script.type == Debug.ScriptType.Native) {
- throw %make_error(kDebugger, 'Cannot set break point in native code.');
- }
- // If the script for the function has a name convert this to a script break
- // point.
- if (script && script.id) {
- // Find line and column for the position in the script and set a script
- // break point from that.
- var location = script.locationFromPosition(source_position, false);
- return this.setScriptBreakPointById(script.id,
- location.line, location.column,
- opt_condition);
- } else {
- // Set a break point directly on the function.
- var break_point = MakeBreakPoint(source_position);
- var actual_position =
- %SetFunctionBreakPoint(func, source_position, break_point);
- var actual_location = script.locationFromPosition(actual_position, true);
- break_point.actual_location = { line: actual_location.line,
- column: actual_location.column,
- script_id: script.id };
- break_point.setCondition(opt_condition);
- return break_point.number();
- }
-};
-
-
-Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
- condition, enabled)
-{
- var break_point = MakeBreakPoint(position);
- break_point.setCondition(condition);
- if (!enabled) {
- break_point.disable();
- }
- var script = scriptById(script_id);
- if (script) {
- break_point.actual_position = %SetScriptBreakPoint(script, position, break_point);
- }
- return break_point;
-};
-
-
-Debug.enableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.enable();
- }
-};
-
-
-Debug.disableBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, false);
- // Only enable if the breakpoint hasn't been deleted:
- if (break_point) {
- break_point.disable();
- }
-};
-
-
-Debug.changeBreakPointCondition = function(break_point_number, condition) {
- var break_point = this.findBreakPoint(break_point_number, false);
- break_point.setCondition(condition);
-};
-
-
-Debug.clearBreakPoint = function(break_point_number) {
- var break_point = this.findBreakPoint(break_point_number, true);
- if (break_point) {
- return %ClearBreakPoint(break_point);
- } else {
- break_point = this.findScriptBreakPoint(break_point_number, true);
- if (!break_point) throw %make_error(kDebugger, 'Invalid breakpoint');
- }
-};
-
-
-Debug.clearAllBreakPoints = function() {
- for (var i = 0; i < break_points.length; i++) {
- var break_point = break_points[i];
- %ClearBreakPoint(break_point);
- }
- break_points = [];
-};
-
-
-Debug.disableAllBreakPoints = function() {
- // Disable all user defined breakpoints:
- for (var i = 1; i < next_break_point_number; i++) {
- Debug.disableBreakPoint(i);
- }
- // Disable all exception breakpoints:
- %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
- %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
-};
-
-
-Debug.findScriptBreakPoint = function(break_point_number, remove) {
- var script_break_point;
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].number() == break_point_number) {
- script_break_point = script_break_points[i];
- // Remove the break point from the list if requested.
- if (remove) {
- script_break_point.clear();
- script_break_points.splice(i,1);
- }
- break;
- }
- }
- return script_break_point;
-};
-
-
-// Sets a breakpoint in a script identified through id or name at the
-// specified source line and column within that line.
-Debug.setScriptBreakPoint = function(type, script_id_or_name,
- opt_line, opt_column, opt_condition,
- opt_groupId) {
- // Create script break point object.
- var script_break_point =
- new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
- opt_groupId);
-
- // Assign number to the new script break point and add it.
- script_break_point.number_ = next_break_point_number++;
- script_break_point.setCondition(opt_condition);
- script_break_points.push(script_break_point);
-
- // Run through all scripts to see if this script break point matches any
- // loaded scripts.
- var scripts = this.scripts();
- for (var i = 0; i < scripts.length; i++) {
- if (script_break_point.matchesScript(scripts[i])) {
- script_break_point.set(scripts[i]);
- }
- }
-
- return script_break_point.number();
-};
-
-
-Debug.setScriptBreakPointById = function(script_id,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- script_id, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.setScriptBreakPointByName = function(script_name,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
- script_name, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.setScriptBreakPointByRegExp = function(script_regexp,
- opt_line, opt_column,
- opt_condition, opt_groupId) {
- return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
- script_regexp, opt_line, opt_column,
- opt_condition, opt_groupId);
-};
-
-
-Debug.enableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.enable();
-};
-
-
-Debug.disableScriptBreakPoint = function(break_point_number) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.disable();
-};
-
-
-Debug.changeScriptBreakPointCondition = function(
- break_point_number, condition) {
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.setCondition(condition);
-};
-
-
-Debug.scriptBreakPoints = function() {
- return script_break_points;
-};
-
-
Debug.clearStepping = function() {
%ClearStepping();
};
@@ -743,28 +176,6 @@ Debug.isBreakOnUncaughtException = function() {
return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
};
-Debug.showBreakPoints = function(f, full) {
- if (!IS_FUNCTION(f)) throw %make_error(kDebuggerType);
- var source = full ? this.scriptSource(f) : this.source(f);
- var offset = full ? 0 : this.sourcePosition(f);
- var locations = %GetBreakLocations(f);
- if (!locations) return source;
- locations.sort(function(x, y) { return x - y; });
- var result = "";
- var prev_pos = 0;
- var pos;
- for (var i = 0; i < locations.length; i++) {
- pos = locations[i] - offset;
- result += source.slice(prev_pos, pos);
- result += "[B" + i + "]";
- prev_pos = pos;
- }
- pos = source.length;
- result += source.substring(prev_pos, pos);
- return result;
-};
-
-
// Get all the scripts currently loaded. Locating all the scripts is based on
// scanning the heap.
Debug.scripts = function() {
@@ -837,46 +248,6 @@ ExecutionState.prototype.selectedFrame = function() {
return this.selected_frame;
};
-function MakeBreakEvent(break_id, break_points_hit) {
- return new BreakEvent(break_id, break_points_hit);
-}
-
-
-function BreakEvent(break_id, break_points_hit) {
- this.frame_ = new FrameMirror(break_id, 0);
- this.break_points_hit_ = break_points_hit;
-}
-
-
-BreakEvent.prototype.eventType = function() {
- return Debug.DebugEvent.Break;
-};
-
-
-BreakEvent.prototype.func = function() {
- return this.frame_.func();
-};
-
-
-BreakEvent.prototype.sourceLine = function() {
- return this.frame_.sourceLine();
-};
-
-
-BreakEvent.prototype.sourceColumn = function() {
- return this.frame_.sourceColumn();
-};
-
-
-BreakEvent.prototype.sourceLineText = function() {
- return this.frame_.sourceLineText();
-};
-
-
-BreakEvent.prototype.breakPointsHit = function() {
- return this.break_points_hit_;
-};
-
function MakeExceptionEvent(break_id, exception, uncaught, promise) {
return new ExceptionEvent(break_id, exception, uncaught, promise);
@@ -994,19 +365,15 @@ AsyncTaskEvent.prototype.id = function() {
utils.InstallConstants(global, [
"Debug", Debug,
- "BreakEvent", BreakEvent,
"CompileEvent", CompileEvent,
- "BreakPoint", BreakPoint,
]);
// Functions needed by the debugger runtime.
utils.InstallConstants(utils, [
"MakeExecutionState", MakeExecutionState,
"MakeExceptionEvent", MakeExceptionEvent,
- "MakeBreakEvent", MakeBreakEvent,
"MakeCompileEvent", MakeCompileEvent,
"MakeAsyncTaskEvent", MakeAsyncTaskEvent,
- "IsBreakPointTriggered", IsBreakPointTriggered,
]);
})
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 4dd8352695..7f5cd5f805 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -176,4 +176,34 @@ typedef int BreakpointId;
} // namespace debug
} // namespace v8
+// TODO(yangguo): this is legacy left over from removing v8-debug.h, and still
+// used in cctests. Let's get rid of these soon.
+namespace v8 {
+enum DebugEvent {
+ Break = 1,
+ Exception = 2,
+ AfterCompile = 3,
+ CompileError = 4,
+ AsyncTaskEvent = 5,
+};
+
+class Debug {
+ public:
+ class EventDetails {
+ public:
+ virtual DebugEvent GetEvent() const = 0;
+ virtual Local<Object> GetExecutionState() const = 0;
+ virtual Local<Object> GetEventData() const = 0;
+ virtual Local<Context> GetEventContext() const = 0;
+ virtual Local<Value> GetCallbackData() const = 0;
+
+ virtual Isolate* GetIsolate() const = 0;
+
+ virtual ~EventDetails() {}
+ };
+
+ typedef void (*EventCallback)(const EventDetails& event_details);
+};
+} // namespace v8
+
#endif // V8_DEBUG_INTERFACE_TYPES_H_
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index a2b22d58d4..4f53f8554f 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -743,11 +743,12 @@ class FeedbackVectorFixer {
for (int i = 0; i < function_instances->length(); i++) {
Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<Cell> new_cell = isolate->factory()->NewManyClosuresCell(
- isolate->factory()->undefined_value());
- fun->set_feedback_vector_cell(*new_cell);
+ Handle<FeedbackCell> feedback_cell =
+ isolate->factory()->NewManyClosuresCell(
+ isolate->factory()->undefined_value());
+ fun->set_feedback_cell(*feedback_cell);
// Only create feedback vectors if we already have the metadata.
- if (shared_info->is_compiled()) JSFunction::EnsureLiterals(fun);
+ if (shared_info->is_compiled()) JSFunction::EnsureFeedbackVector(fun);
}
}
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 2335b94f10..db599b77e6 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -321,4 +321,4 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
} // namespace internal
} // namespace v8
-#endif /* V8_DEBUG_LIVEEDIT_H_ */
+#endif // V8_DEBUG_LIVEEDIT_H_
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/debug/mips/OWNERS
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/debug/mips64/OWNERS
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 15d5e64258..85052b3cae 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -54,11 +54,11 @@ macro IS_DATE(arg)
endmacro
macro IS_ERROR(arg)
-(%_ClassOf(arg) === 'Error')
+(%IsJSError(arg))
endmacro
macro IS_GENERATOR(arg)
-(%_ClassOf(arg) === 'Generator')
+(%IsJSGeneratorObject(arg))
endmacro
macro IS_MAP(arg)
@@ -66,11 +66,11 @@ macro IS_MAP(arg)
endmacro
macro IS_MAP_ITERATOR(arg)
-(%_ClassOf(arg) === 'Map Iterator')
+(%IsJSMapIterator(arg))
endmacro
macro IS_SCRIPT(arg)
-(%_ClassOf(arg) === 'Script')
+(%IsScriptWrapper(arg))
endmacro
macro IS_SET(arg)
@@ -78,7 +78,7 @@ macro IS_SET(arg)
endmacro
macro IS_SET_ITERATOR(arg)
-(%_ClassOf(arg) === 'Set Iterator')
+(%IsJSSetIterator(arg))
endmacro
// Must match PropertyFilter in property-details.h
@@ -638,7 +638,7 @@ inherits(ObjectMirror, ValueMirror);
ObjectMirror.prototype.className = function() {
- return %_ClassOf(this.value_);
+ return %ClassOf(this.value_);
};
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index 3fabf555be..6881e114b3 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -41,6 +41,7 @@ namespace internal {
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotANumberOrOddball, "not a Number or Oddball") \
V(NotASmi, "not a Smi") \
+ V(NotAString, "not a String") \
V(NotASymbol, "not a Symbol") \
V(OutOfBounds, "out of bounds") \
V(Overflow, "overflow") \
@@ -53,7 +54,8 @@ namespace internal {
V(WrongInstanceType, "wrong instance type") \
V(WrongMap, "wrong map") \
V(WrongName, "wrong name") \
- V(WrongValue, "wrong value")
+ V(WrongValue, "wrong value") \
+ V(NoInitialElement, "no initial element")
enum class DeoptimizeReason : uint8_t {
#define DEOPTIMIZE_REASON(Name, message) k##Name,
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 362bd12cb6..644bd29796 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -276,6 +276,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
}
+ isolate->AbortConcurrentOptimization(BlockingBehavior::kBlock);
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
Object* context = isolate->heap()->native_contexts_list();
@@ -545,10 +546,8 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
switch (translated_frame->kind()) {
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
- BytecodeArray* bytecode =
- translated_frame->raw_shared_info()->bytecode_array();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- return table->LookupRange(bytecode_offset, data_out, nullptr);
+ HandlerTable table(translated_frame->raw_shared_info()->bytecode_array());
+ return table.LookupRange(bytecode_offset, data_out, nullptr);
}
default:
break;
@@ -956,7 +955,8 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
(!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
+ output_frame->SetPc(
+ reinterpret_cast<intptr_t>(dispatch_builtin->InstructionStart()));
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
@@ -980,7 +980,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
+ reinterpret_cast<intptr_t>(continuation->InstructionStart()));
}
}
@@ -1114,7 +1114,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
Code* adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
intptr_t pc_value = reinterpret_cast<intptr_t>(
- adaptor_trampoline->instruction_start() +
+ adaptor_trampoline->InstructionStart() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
if (FLAG_enable_embedded_constant_pool) {
@@ -1303,7 +1303,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Compute this frame's PC.
DCHECK(bailout_id.IsValidForConstructStub());
- Address start = construct_stub->instruction_start();
+ Address start = construct_stub->InstructionStart();
int pc_offset =
bailout_id == BailoutId::ConstructStubCreate()
? isolate_->heap()->construct_stub_create_deopt_pc_offset()->value()
@@ -1338,7 +1338,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
DCHECK_EQ(LAZY, bailout_type_);
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
+ reinterpret_cast<intptr_t>(continuation->InstructionStart()));
}
}
@@ -1688,12 +1688,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
: isolate()->builtins()->builtin(
Builtins::kContinueToCodeStubBuiltin));
output_frame->SetPc(
- reinterpret_cast<intptr_t>(continue_to_builtin->instruction_start()));
+ reinterpret_cast<intptr_t>(continue_to_builtin->InstructionStart()));
Code* continuation =
isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
+ reinterpret_cast<intptr_t>(continuation->InstructionStart()));
}
void Deoptimizer::MaterializeHeapObjects() {
@@ -1832,14 +1832,13 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, type);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
// Allocate the code as immovable since the entry addresses will be used
// directly and there is no support for relocating them.
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::STUB, Handle<Object>(), Builtins::kNoBuiltinId,
- MaybeHandle<HandlerTable>(), MaybeHandle<ByteArray>(),
- MaybeHandle<DeoptimizationData>(), kImmovable);
+ MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), kImmovable);
CHECK(Heap::IsImmovable(*code));
CHECK_NULL(data->deopt_entry_code_[type]);
@@ -2287,7 +2286,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
- CHECK(code->instruction_start() <= pc && pc <= code->instruction_end());
+ CHECK(code->InstructionStart() <= pc && pc <= code->InstructionEnd());
SourcePosition last_position = SourcePosition::Unknown();
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
int last_deopt_id = kNoDeoptimizationId;
@@ -3906,7 +3905,7 @@ bool TranslatedState::DoUpdateFeedback() {
if (!feedback_vector_handle_.is_null()) {
CHECK(!feedback_slot_.IsInvalid());
isolate()->CountUsage(v8::Isolate::kDeoptimizerDisableSpeculation);
- CallICNexus nexus(feedback_vector_handle_, feedback_slot_);
+ FeedbackNexus nexus(feedback_vector_handle_, feedback_slot_);
nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
return true;
}
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index 263aa317d5..00e0e29546 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -41,6 +41,13 @@ class Disassembler {
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+ // Disassemblers on ia32/x64 need a separate method for testing, as
+ // instruction decode method above continues on unimplemented opcodes, and
+ // does not test the disassemblers. Basic functionality of the method remains
+ // the same.
+ int InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
+ byte* instruction);
+
// Returns -1 if instruction does not mark the beginning of a constant pool,
// or the number of entries in the constant pool beginning here.
int ConstantPoolSizeAt(byte* instruction);
@@ -48,6 +55,7 @@ class Disassembler {
// Write disassembly into specified file 'f' using specified NameConverter
// (see constructor).
static void Disassemble(FILE* f, byte* begin, byte* end);
+
private:
const NameConverter& converter_;
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index a26517b432..86cce891ec 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -256,7 +256,9 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// Print all the reloc info for this instruction which are not comments.
for (size_t i = 0; i < pcs.size(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
+ Code* host = converter.code();
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], host);
+ relocinfo.set_constant_pool(host ? host->constant_pool() : nullptr);
bool first_reloc_info = (i == 0);
PrintRelocInfo(&out, isolate, ref_encoder, os, &relocinfo,
@@ -267,7 +269,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
if (pcs.empty() && converter.code() != nullptr) {
- RelocInfo dummy_rinfo(prev_pc, RelocInfo::NONE32, 0, nullptr);
+ RelocInfo dummy_rinfo(prev_pc, RelocInfo::NONE, 0, nullptr);
if (dummy_rinfo.IsInConstantPool()) {
byte* constant_pool_entry_address =
dummy_rinfo.constant_pool_entry_address();
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/eh-frame.h
index bd064eb9cd..68a8c031c0 100644
--- a/deps/v8/src/eh-frame.h
+++ b/deps/v8/src/eh-frame.h
@@ -298,4 +298,4 @@ class EhFrameDisassembler final {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_EH_FRAME_H_
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 0018d59c63..0905677c3c 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -30,6 +30,8 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case PACKED_DOUBLE_ELEMENTS:
case HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
return 3;
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
@@ -153,6 +155,92 @@ bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
return false;
}
+bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b) {
+ // Assert that the union of two ElementKinds can be computed via std::max.
+ static_assert(PACKED_SMI_ELEMENTS < HOLEY_SMI_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(HOLEY_SMI_ELEMENTS < PACKED_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_ELEMENTS < HOLEY_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ static_assert(PACKED_DOUBLE_ELEMENTS < HOLEY_DOUBLE_ELEMENTS,
+ "ElementsKind union not computable via std::max.");
+ ElementsKind a = *a_out;
+ switch (a) {
+ case PACKED_SMI_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = b;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_SMI_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ *a_out = HOLEY_SMI_ELEMENTS;
+ return true;
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ case PACKED_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ *a_out = PACKED_ELEMENTS;
+ return true;
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_ELEMENTS:
+ switch (b) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ *a_out = HOLEY_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+ break;
+ case PACKED_DOUBLE_ELEMENTS:
+ switch (b) {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ *a_out = b;
+ return true;
+ default:
+ return false;
+ }
+ case HOLEY_DOUBLE_ELEMENTS:
+ switch (b) {
+ case PACKED_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ *a_out = HOLEY_DOUBLE_ELEMENTS;
+ return true;
+ default:
+ return false;
+ }
+
+ break;
+ default:
+ break;
+ }
+ return false;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index b03f9340f3..b00966ef10 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -49,17 +49,19 @@ enum ElementsKind {
FLOAT32_ELEMENTS,
FLOAT64_ELEMENTS,
UINT8_CLAMPED_ELEMENTS,
+ BIGUINT64_ELEMENTS,
+ BIGINT64_ELEMENTS,
// Sentinel ElementsKind for objects with no elements.
NO_ELEMENTS,
// Derived constants from ElementsKind.
FIRST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
- LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
+ LAST_ELEMENTS_KIND = BIGINT64_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS,
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
- LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS
};
@@ -229,6 +231,8 @@ inline bool UnionElementsKindUptoPackedness(ElementsKind* a_out,
return false;
}
+bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b);
+
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
DCHECK(IsSmiElementsKind(from_kind));
return (from_kind == PACKED_SMI_ELEMENTS) ? PACKED_ELEMENTS : HOLEY_ELEMENTS;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 499af83078..471798dd79 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -12,7 +12,6 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/utils.h"
-#include "src/zone/zone.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -39,6 +38,8 @@
// - FixedFloat32ElementsAccessor
// - FixedFloat64ElementsAccessor
// - FixedUint8ClampedElementsAccessor
+// - FixedBigUint64ElementsAccessor
+// - FixedBigInt64ElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
// - FastSloppyArgumentsElementsAccessor
@@ -90,7 +91,9 @@ enum Where { AT_START, AT_END };
V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
- FixedUint8ClampedArray)
+ FixedUint8ClampedArray) \
+ V(FixedBigUint64ElementsAccessor, BIGUINT64_ELEMENTS, FixedBigUint64Array) \
+ V(FixedBigInt64ElementsAccessor, BIGINT64_ELEMENTS, FixedBigInt64Array)
template<ElementsKind Kind> class ElementsKindTraits {
public:
@@ -718,22 +721,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::SliceImpl(receiver, start, end);
}
- Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
- uint32_t end, Handle<JSObject> result) final {
- return Subclass::SliceWithResultImpl(receiver, start, end, result);
- }
-
static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
uint32_t end) {
UNREACHABLE();
}
- static Handle<JSObject> SliceWithResultImpl(Handle<JSObject> receiver,
- uint32_t start, uint32_t end,
- Handle<JSObject> result) {
- UNREACHABLE();
- }
-
Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
uint32_t delete_count, Arguments* args,
uint32_t add_count) final {
@@ -1035,13 +1027,25 @@ class ElementsAccessorBase : public InternalElementsAccessor {
kPackedSizeNotKnown, size);
}
- Object* CopyElements(Handle<JSReceiver> source, Handle<JSObject> destination,
+ void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination, size_t start,
+ size_t end) {
+ Subclass::CopyTypedArrayElementsSliceImpl(source, destination, start, end);
+ }
+
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray* source,
+ JSTypedArray* destination,
+ size_t start, size_t end) {
+ UNREACHABLE();
+ }
+
+ Object* CopyElements(Handle<Object> source, Handle<JSObject> destination,
size_t length, uint32_t offset) final {
return Subclass::CopyElementsHandleImpl(source, destination, length,
offset);
}
- static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
+ static Object* CopyElementsHandleImpl(Handle<Object> source,
Handle<JSObject> destination,
size_t length, uint32_t offset) {
UNREACHABLE();
@@ -2995,15 +2999,9 @@ class TypedElementsAccessor
uint32_t end) {
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
DCHECK(!array->WasNeutered());
- DCHECK(obj_value->IsNumber());
+ DCHECK(obj_value->IsNumeric());
- ctype value;
- if (obj_value->IsSmi()) {
- value = BackingStore::from(Smi::ToInt(*obj_value));
- } else {
- DCHECK(obj_value->IsHeapNumber());
- value = BackingStore::from(HeapNumber::cast(*obj_value)->value());
- }
+ ctype value = BackingStore::FromHandle(obj_value);
// Ensure indexes are within array bounds
DCHECK_LE(0, start);
@@ -3034,41 +3032,49 @@ class TypedElementsAccessor
length > static_cast<uint32_t>(elements->length())) {
return Just(true);
}
- if (!value->IsNumber()) return Just(false);
-
- double search_value = value->Number();
-
- if (!std::isfinite(search_value)) {
- // Integral types cannot represent +Inf or NaN
- if (AccessorClass::kind() < FLOAT32_ELEMENTS ||
- AccessorClass::kind() > FLOAT64_ELEMENTS) {
- return Just(false);
- }
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
- // Return false if value can't be represented in this space
- return Just(false);
- }
-
+ ctype typed_search_value;
// Prototype has no elements, and not searching for the hole --- limit
// search to backing store length.
if (static_cast<uint32_t>(elements->length()) < length) {
length = elements->length();
}
- if (!std::isnan(search_value)) {
- for (uint32_t k = start_from; k < length; ++k) {
- double element_k = elements->get_scalar(k);
- if (element_k == search_value) return Just(true);
- }
- return Just(false);
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just(false);
+ bool lossless;
+ typed_search_value = BackingStore::FromHandle(value, &lossless);
+ if (!lossless) return Just(false);
} else {
- for (uint32_t k = start_from; k < length; ++k) {
- double element_k = elements->get_scalar(k);
- if (std::isnan(element_k)) return Just(true);
+ if (!value->IsNumber()) return Just(false);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN.
+ if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) {
+ return Just(false);
+ }
+ if (std::isnan(search_value)) {
+ for (uint32_t k = start_from; k < length; ++k) {
+ double element_k = elements->get_scalar(k);
+ if (std::isnan(element_k)) return Just(true);
+ }
+ return Just(false);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return false if value can't be represented in this space.
+ return Just(false);
}
- return Just(false);
+ typed_search_value = static_cast<ctype>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just(false); // Loss of precision.
+ }
+ }
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ ctype element_k = elements->get_scalar(k);
+ if (element_k == typed_search_value) return Just(true);
}
+ return Just(false);
}
static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
@@ -3080,20 +3086,33 @@ class TypedElementsAccessor
if (WasNeutered(*receiver)) return Just<int64_t>(-1);
BackingStore* elements = BackingStore::cast(receiver->elements());
- if (!value->IsNumber()) return Just<int64_t>(-1);
-
- double search_value = value->Number();
+ ctype typed_search_value;
- if (!std::isfinite(search_value)) {
- // Integral types cannot represent +Inf or NaN.
- if (AccessorClass::kind() < FLOAT32_ELEMENTS ||
- AccessorClass::kind() > FLOAT64_ELEMENTS) {
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just<int64_t>(-1);
+ bool lossless;
+ typed_search_value = BackingStore::FromHandle(value, &lossless);
+ if (!lossless) return Just<int64_t>(-1);
+ } else {
+ if (!value->IsNumber()) return Just<int64_t>(-1);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN.
+ if (Kind < FLOAT32_ELEMENTS || Kind > FLOAT64_ELEMENTS) {
+ return Just<int64_t>(-1);
+ }
+ if (std::isnan(search_value)) {
+ return Just<int64_t>(-1);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return false if value can't be represented in this ElementsKind.
return Just<int64_t>(-1);
}
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
- // Return false if value can't be represented in this ElementsKind.
- return Just<int64_t>(-1);
+ typed_search_value = static_cast<ctype>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just<int64_t>(-1); // Loss of precision.
+ }
}
// Prototype has no elements, and not searching for the hole --- limit
@@ -3102,15 +3121,6 @@ class TypedElementsAccessor
length = elements->length();
}
- if (std::isnan(search_value)) {
- return Just<int64_t>(-1);
- }
-
- ctype typed_search_value = static_cast<ctype>(search_value);
- if (static_cast<double>(typed_search_value) != search_value) {
- return Just<int64_t>(-1); // Loss of precision.
- }
-
for (uint32_t k = start_from; k < length; ++k) {
ctype element_k = elements->get_scalar(k);
if (element_k == typed_search_value) return Just<int64_t>(k);
@@ -3125,28 +3135,34 @@ class TypedElementsAccessor
DisallowHeapAllocation no_gc;
DCHECK(!WasNeutered(*receiver));
- if (!value->IsNumber()) return Just<int64_t>(-1);
BackingStore* elements = BackingStore::cast(receiver->elements());
+ ctype typed_search_value;
- double search_value = value->Number();
-
- if (!std::isfinite(search_value)) {
- if (std::is_integral<ctype>::value) {
- // Integral types cannot represent +Inf or NaN.
- return Just<int64_t>(-1);
- } else if (std::isnan(search_value)) {
- // Strict Equality Comparison of NaN is always false.
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ if (!value->IsBigInt()) return Just<int64_t>(-1);
+ bool lossless;
+ typed_search_value = BackingStore::FromHandle(value, &lossless);
+ if (!lossless) return Just<int64_t>(-1);
+ } else {
+ if (!value->IsNumber()) return Just<int64_t>(-1);
+ double search_value = value->Number();
+ if (!std::isfinite(search_value)) {
+ if (std::is_integral<ctype>::value) {
+ // Integral types cannot represent +Inf or NaN.
+ return Just<int64_t>(-1);
+ } else if (std::isnan(search_value)) {
+ // Strict Equality Comparison of NaN is always false.
+ return Just<int64_t>(-1);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return -1 if value can't be represented in this ElementsKind.
return Just<int64_t>(-1);
}
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
- // Return -1 if value can't be represented in this ElementsKind.
- return Just<int64_t>(-1);
- }
-
- ctype typed_search_value = static_cast<ctype>(search_value);
- if (static_cast<double>(typed_search_value) != search_value) {
- return Just<int64_t>(-1); // Loss of precision.
+ typed_search_value = static_cast<ctype>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just<int64_t>(-1); // Loss of precision.
+ }
}
DCHECK_LT(start_from, elements->length());
@@ -3186,55 +3202,52 @@ class TypedElementsAccessor
return result;
}
- static Handle<JSObject> SliceWithResultImpl(Handle<JSObject> receiver,
- uint32_t start, uint32_t end,
- Handle<JSObject> result) {
- Isolate* isolate = receiver->GetIsolate();
- DCHECK(!WasNeutered(*receiver));
- DCHECK(result->IsJSTypedArray());
- DCHECK(!WasNeutered(*result));
+ static void CopyTypedArrayElementsSliceImpl(JSTypedArray* source,
+ JSTypedArray* destination,
+ size_t start, size_t end) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(destination->GetElementsKind(), AccessorClass::kind());
+ DCHECK(!source->WasNeutered());
+ DCHECK(!destination->WasNeutered());
DCHECK_LE(start, end);
+ DCHECK_LE(end, source->length_value());
- Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
- Handle<JSTypedArray> result_array = Handle<JSTypedArray>::cast(result);
- DCHECK_LE(end, array->length_value());
+ size_t count = end - start;
+ DCHECK_LE(count, destination->length_value());
- // Fast path for the same type result array
- if (result_array->type() == array->type()) {
- int64_t element_size = array->element_size();
- int64_t count = end - start;
+ FixedTypedArrayBase* src_elements =
+ FixedTypedArrayBase::cast(source->elements());
+ BackingStore* dest_elements = BackingStore::cast(destination->elements());
- DisallowHeapAllocation no_gc;
- BackingStore* src_elements = BackingStore::cast(receiver->elements());
- BackingStore* result_elements =
- BackingStore::cast(result_array->elements());
-
- DCHECK_LE(count, result_elements->length());
- uint8_t* src =
- static_cast<uint8_t*>(src_elements->DataPtr()) + start * element_size;
- uint8_t* result = static_cast<uint8_t*>(result_elements->DataPtr());
- if (array->buffer() != result_array->buffer()) {
- std::memcpy(result, src, count * element_size);
- } else {
- // The spec defines the copy-step iteratively, which means that we
- // cannot use memcpy if the buffer is shared.
- uint8_t* end = src + count * element_size;
- while (src < end) {
- *result++ = *src++;
- }
+ size_t element_size = source->element_size();
+ uint8_t* source_data =
+ static_cast<uint8_t*>(src_elements->DataPtr()) + start * element_size;
+
+ // Fast path for the same type result array
+ if (source->type() == destination->type()) {
+ uint8_t* dest_data = static_cast<uint8_t*>(dest_elements->DataPtr());
+
+ // The spec defines the copy-step iteratively, which means that we
+ // cannot use memcpy if the buffer is shared.
+ uint8_t* end_ptr = source_data + count * element_size;
+ while (source_data < end_ptr) {
+ *dest_data++ = *source_data++;
}
- return result_array;
+ return;
}
- // If the types of the two typed arrays are different, properly convert
- // elements
- Handle<BackingStore> from(BackingStore::cast(array->elements()), isolate);
- ElementsAccessor* result_accessor = result_array->GetElementsAccessor();
- for (uint32_t i = start; i < end; i++) {
- Handle<Object> elem = AccessorClass::GetImpl(isolate, *from, i);
- result_accessor->Set(result_array, i - start, *elem);
+ switch (source->GetElementsKind()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<Type##ArrayTraits>(source_data, dest_elements, \
+ count, 0); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ default:
+ UNREACHABLE();
+ break;
}
- return result_array;
}
static bool HasSimpleRepresentation(InstanceType type) {
@@ -3271,9 +3284,9 @@ class TypedElementsAccessor
BackingStore::cast(destination->elements());
DCHECK_LE(offset, destination->length_value());
- DCHECK_LE(source->length_value(), destination->length_value() - offset);
+ DCHECK_LE(length, destination->length_value() - offset);
DCHECK(source->length()->IsSmi());
- DCHECK_EQ(length, source->length_value());
+ DCHECK_LE(length, source->length_value());
InstanceType source_type = source_elements->map()->instance_type();
InstanceType destination_type =
@@ -3298,15 +3311,15 @@ class TypedElementsAccessor
std::memmove(dest_data + offset * element_size, source_data,
length * element_size);
} else {
- Isolate* isolate = source->GetIsolate();
- Zone zone(isolate->allocator(), ZONE_NAME);
+ std::unique_ptr<uint8_t[]> cloned_source_elements;
// If the typedarrays are overlapped, clone the source.
if (dest_data + dest_byte_length > source_data &&
source_data + source_byte_length > dest_data) {
- uint8_t* temp_data = zone.NewArray<uint8_t>(source_byte_length);
- std::memcpy(temp_data, source_data, source_byte_length);
- source_data = temp_data;
+ cloned_source_elements.reset(new uint8_t[source_byte_length]);
+ std::memcpy(cloned_source_elements.get(), source_data,
+ source_byte_length);
+ source_data = cloned_source_elements.get();
}
switch (source->GetElementsKind()) {
@@ -3339,7 +3352,8 @@ class TypedElementsAccessor
// them.
if (source_proto->IsNull(isolate)) return false;
if (source_proto->IsJSProxy()) return true;
- if (!context->is_initial_array_prototype(JSObject::cast(source_proto))) {
+ if (!context->native_context()->is_initial_array_prototype(
+ JSObject::cast(source_proto))) {
return true;
}
@@ -3349,6 +3363,7 @@ class TypedElementsAccessor
static bool TryCopyElementsFastNumber(Context* context, JSArray* source,
JSTypedArray* destination,
size_t length, uint32_t offset) {
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) return false;
Isolate* isolate = source->GetIsolate();
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
@@ -3418,18 +3433,24 @@ class TypedElementsAccessor
return false;
}
- static Object* CopyElementsHandleSlow(Handle<JSReceiver> source,
+ static Object* CopyElementsHandleSlow(Handle<Object> source,
Handle<JSTypedArray> destination,
size_t length, uint32_t offset) {
- Isolate* isolate = source->GetIsolate();
+ Isolate* isolate = destination->GetIsolate();
Handle<BackingStore> destination_elements(
BackingStore::cast(destination->elements()));
for (uint32_t i = 0; i < length; i++) {
- LookupIterator it(isolate, source, i, source);
+ LookupIterator it(isolate, source, i);
Handle<Object> elem;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
Object::GetProperty(&it));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, Object::ToNumber(elem));
+ if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ BigInt::FromObject(isolate, elem));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::ToNumber(elem));
+ }
if (V8_UNLIKELY(destination->WasNeutered())) {
const char* op = "set";
@@ -3450,7 +3471,7 @@ class TypedElementsAccessor
// This doesn't guarantee that the destination array will be completely
// filled. The caller must do this by passing a source with equal length, if
// that is required.
- static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
+ static Object* CopyElementsHandleImpl(Handle<Object> source,
Handle<JSObject> destination,
size_t length, uint32_t offset) {
Isolate* isolate = destination->GetIsolate();
@@ -3463,8 +3484,30 @@ class TypedElementsAccessor
// All conversions from TypedArrays can be done without allocation.
if (source->IsJSTypedArray()) {
Handle<JSTypedArray> source_ta = Handle<JSTypedArray>::cast(source);
- CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
- return *isolate->factory()->undefined_value();
+ ElementsKind source_kind = source_ta->GetElementsKind();
+ bool source_is_bigint =
+ source_kind == BIGINT64_ELEMENTS || source_kind == BIGUINT64_ELEMENTS;
+ bool target_is_bigint =
+ Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS;
+ if (target_is_bigint) {
+ if (V8_UNLIKELY(!source_is_bigint)) {
+ Handle<Object> first =
+ JSReceiver::GetElement(isolate, source_ta, 0).ToHandleChecked();
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kBigIntFromObject, first));
+ }
+ } else {
+ if (V8_UNLIKELY(source_is_bigint)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kBigIntToNumber));
+ }
+ }
+ // If we have to copy more elements than we have in the source, we need to
+ // do special handling and conversion; that happens in the slow case.
+ if (length + offset <= source_ta->length_value()) {
+ CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
+ return *isolate->factory()->undefined_value();
+ }
}
// Fast cases for packed numbers kinds where we don't need to allocate.
@@ -4459,6 +4502,13 @@ void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
}
}
+void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination, uintptr_t start,
+ uintptr_t end) {
+ destination->GetElementsAccessor()->CopyTypedArrayElementsSlice(
+ source, destination, start, end);
+}
+
void ElementsAccessor::InitializeOncePerProcess() {
static ElementsAccessor* accessor_array[] = {
#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index de5aa0d878..a2b8b49c93 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -12,6 +12,8 @@
namespace v8 {
namespace internal {
+class JSTypedArray;
+
// Abstract base class for handles that can operate on objects with differing
// ElementsKinds.
class ElementsAccessor {
@@ -141,9 +143,6 @@ class ElementsAccessor {
virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
uint32_t end) = 0;
- virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
- uint32_t end, Handle<JSObject> result) = 0;
-
virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
uint32_t start, uint32_t delete_count,
Arguments* args, uint32_t add_count) = 0;
@@ -185,7 +184,7 @@ class ElementsAccessor {
ElementsKind source_kind,
Handle<FixedArrayBase> destination, int size) = 0;
- virtual Object* CopyElements(Handle<JSReceiver> source,
+ virtual Object* CopyElements(Handle<Object> source,
Handle<JSObject> destination, size_t length,
uint32_t offset = 0) = 0;
@@ -193,6 +192,10 @@ class ElementsAccessor {
Handle<JSObject> object,
uint32_t length) = 0;
+ virtual void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination,
+ size_t start, size_t end) = 0;
+
protected:
friend class LookupIterator;
@@ -241,7 +244,6 @@ MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
Arguments* args);
// Called directly from CSA.
-class JSTypedArray;
void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
JSArray* source,
JSTypedArray* destination,
@@ -250,6 +252,9 @@ void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
JSTypedArray* destination,
uintptr_t length, uintptr_t offset);
+void CopyTypedArrayElementsSlice(JSTypedArray* source,
+ JSTypedArray* destination, uintptr_t start,
+ uintptr_t end);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 7dd920a446..5030e261d6 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -5,11 +5,8 @@
#ifndef V8_EXECUTION_H_
#define V8_EXECUTION_H_
-#include "src/allocation.h"
#include "src/base/atomicops.h"
#include "src/globals.h"
-#include "src/objects/code.h"
-#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 52157b5034..c20592dc81 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -281,6 +281,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::copy_typed_array_elements_to_typed_array(isolate)
.address(),
"copy_typed_array_elements_to_typed_array");
+ Add(ExternalReference::copy_typed_array_elements_slice(isolate).address(),
+ "copy_typed_array_elements_slice");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index fab539bf8b..6fd8e8c61e 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -17,7 +17,9 @@
#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/microtask-inl.h"
#include "src/objects/module.h"
+#include "src/objects/promise-inl.h"
#include "src/objects/scope-info.h"
#include "src/unicode-cache.h"
#include "src/unicode-decoder.h"
@@ -149,13 +151,11 @@ Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
}
Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
- int hash, Handle<FixedArray> raw_strings,
- Handle<FixedArray> cooked_strings) {
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
DCHECK_EQ(raw_strings->length(), cooked_strings->length());
DCHECK_LT(0, raw_strings->length());
Handle<TemplateObjectDescription> result =
- Handle<TemplateObjectDescription>::cast(NewStruct(TUPLE3_TYPE, TENURED));
- result->set_hash(hash);
+ Handle<TemplateObjectDescription>::cast(NewStruct(TUPLE2_TYPE, TENURED));
result->set_raw_strings(*raw_strings);
result->set_cooked_strings(*cooked_strings);
return result;
@@ -222,7 +222,8 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
FixedArray);
}
-Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
+Handle<FixedArray> Factory::NewUninitializedFixedArray(
+ int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -230,7 +231,7 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFixedArray(length, NOT_TENURED),
+ isolate()->heap()->AllocateFixedArray(length, pretenure),
FixedArray);
}
@@ -391,9 +392,9 @@ MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
- const char* start = string.start();
+ const char* ascii_data = string.start();
int length = string.length();
- int non_ascii_start = String::NonAsciiStart(start, length);
+ int non_ascii_start = String::NonAsciiStart(ascii_data, length);
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
@@ -401,35 +402,38 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
}
// Non-ASCII and we need to decode.
+ auto non_ascii = string.SubVector(non_ascii_start, length);
Access<UnicodeCache::Utf8Decoder>
decoder(isolate()->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start() + non_ascii_start,
- length - non_ascii_start);
+ decoder->Reset(non_ascii);
+
int utf16_length = static_cast<int>(decoder->Utf16Length());
DCHECK_GT(utf16_length, 0);
+
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure),
String);
+
// Copy ASCII portion.
uint16_t* data = result->GetChars();
- const char* ascii_data = string.start();
for (int i = 0; i < non_ascii_start; i++) {
*data++ = *ascii_data++;
}
+
// Now write the remainder.
- decoder->WriteUtf16(data, utf16_length);
+ decoder->WriteUtf16(data, utf16_length, non_ascii);
return result;
}
MaybeHandle<String> Factory::NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int length,
PretenureFlag pretenure) {
- // Check for ASCII first since this is the common case.
- const char* start = reinterpret_cast<const char*>(str->GetChars() + begin);
- int non_ascii_start = String::NonAsciiStart(start, length);
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars() + begin);
+ int non_ascii_start = String::NonAsciiStart(ascii_data, length);
if (non_ascii_start >= length) {
// If the string is ASCII, we can just make a substring.
// TODO(v8): the pretenure flag is ignored in this case.
@@ -437,28 +441,35 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
}
// Non-ASCII and we need to decode.
+ auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
Access<UnicodeCache::Utf8Decoder> decoder(
isolate()->unicode_cache()->utf8_decoder());
- decoder->Reset(start + non_ascii_start, length - non_ascii_start);
+ decoder->Reset(non_ascii);
+
int utf16_length = static_cast<int>(decoder->Utf16Length());
DCHECK_GT(utf16_length, 0);
+
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
- // Reset the decoder, because the original {str} may have moved.
- const char* ascii_data =
- reinterpret_cast<const char*>(str->GetChars() + begin);
- decoder->Reset(ascii_data + non_ascii_start, length - non_ascii_start);
+ // Update pointer references, since the original string may have moved after
+ // allocation.
+ ascii_data = reinterpret_cast<const char*>(str->GetChars() + begin);
+ non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
+
// Copy ASCII portion.
uint16_t* data = result->GetChars();
for (int i = 0; i < non_ascii_start; i++) {
*data++ = *ascii_data++;
}
+
// Now write the remainder.
- decoder->WriteUtf16(data, utf16_length);
+ decoder->WriteUtf16(data, utf16_length, non_ascii);
return result;
}
@@ -985,6 +996,12 @@ Handle<Symbol> Factory::NewPrivateSymbol() {
return symbol;
}
+Handle<Symbol> Factory::NewPrivateFieldSymbol() {
+ Handle<Symbol> symbol = NewSymbol();
+ symbol->set_is_private_field();
+ return symbol;
+}
+
Handle<Context> Factory::NewNativeContext() {
Handle<FixedArray> array =
NewFixedArray(Context::NATIVE_CONTEXT_SLOTS, TENURED);
@@ -1186,6 +1203,38 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
return script;
}
+Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
+ Handle<Context> context) {
+ DCHECK(callable->IsCallable());
+ Handle<CallableTask> microtask =
+ Handle<CallableTask>::cast(NewStruct(CALLABLE_TASK_TYPE));
+ microtask->set_callable(*callable);
+ microtask->set_context(*context);
+ return microtask;
+}
+
+Handle<CallbackTask> Factory::NewCallbackTask(Handle<Foreign> callback,
+ Handle<Foreign> data) {
+ Handle<CallbackTask> microtask =
+ Handle<CallbackTask>::cast(NewStruct(CALLBACK_TASK_TYPE));
+ microtask->set_callback(*callback);
+ microtask->set_data(*data);
+ return microtask;
+}
+
+Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
+ Handle<JSReceiver> thenable, Handle<Context> context) {
+ DCHECK(then->IsCallable());
+ Handle<PromiseResolveThenableJobTask> microtask =
+ Handle<PromiseResolveThenableJobTask>::cast(
+ NewStruct(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE));
+ microtask->set_promise_to_resolve(*promise_to_resolve);
+ microtask->set_then(*then);
+ microtask->set_thenable(*thenable);
+ microtask->set_context(*context);
+ return microtask;
+}
Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(isolate(),
@@ -1194,11 +1243,6 @@ Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
}
-Handle<Foreign> Factory::NewForeign(const AccessorDescriptor* desc) {
- return NewForeign((Address) desc, TENURED);
-}
-
-
Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
CALL_HEAP_FUNCTION(
@@ -1247,22 +1291,28 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
Cell);
}
-Handle<Cell> Factory::NewNoClosuresCell(Handle<Object> value) {
- Handle<Cell> cell = NewCell(value);
- cell->set_map_no_write_barrier(*no_closures_cell_map());
- return cell;
+Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFeedbackCell(
+ isolate()->heap()->no_closures_cell_map(), *value),
+ FeedbackCell);
}
-Handle<Cell> Factory::NewOneClosureCell(Handle<Object> value) {
- Handle<Cell> cell = NewCell(value);
- cell->set_map_no_write_barrier(*one_closure_cell_map());
- return cell;
+Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFeedbackCell(
+ isolate()->heap()->one_closure_cell_map(), *value),
+ FeedbackCell);
}
-Handle<Cell> Factory::NewManyClosuresCell(Handle<Object> value) {
- Handle<Cell> cell = NewCell(value);
- cell->set_map_no_write_barrier(*many_closures_cell_map());
- return cell;
+Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateFeedbackCell(
+ isolate()->heap()->many_closures_cell_map(), *value),
+ FeedbackCell);
}
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name) {
@@ -1423,8 +1473,10 @@ Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
HeapNumber);
}
-Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length) {
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateBigInt(length),
+Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateBigInt(length, pretenure),
FreshlyAllocatedBigInt);
}
@@ -1519,7 +1571,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
function->set_shared(*info);
function->set_code(info->code());
function->set_context(*context_or_undefined);
- function->set_feedback_vector_cell(*undefined_cell());
+ function->set_feedback_cell(*many_closures_cell());
int header_size;
if (map->has_prototype_slot()) {
header_size = JSFunction::kSizeWithPrototype;
@@ -1664,11 +1716,11 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
- Handle<Cell> vector, PretenureFlag pretenure) {
+ Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure) {
Handle<Map> initial_map(
Map::cast(context->native_context()->get(info->function_map_index())));
- return NewFunctionFromSharedFunctionInfo(initial_map, info, context, vector,
- pretenure);
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
+ feedback_cell, pretenure);
}
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
@@ -1688,29 +1740,29 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Object> context_or_undefined, Handle<Cell> vector,
+ Handle<Object> context_or_undefined, Handle<FeedbackCell> feedback_cell,
PretenureFlag pretenure) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
Handle<JSFunction> result =
NewFunction(initial_map, info, context_or_undefined, pretenure);
- // Bump the closure count that is encoded in the vector cell's map.
- if (vector->map() == *no_closures_cell_map()) {
- vector->set_map(*one_closure_cell_map());
- } else if (vector->map() == *one_closure_cell_map()) {
- vector->set_map(*many_closures_cell_map());
+ // Bump the closure count that is encoded in the feedback cell's map.
+ if (feedback_cell->map() == *no_closures_cell_map()) {
+ feedback_cell->set_map(*one_closure_cell_map());
+ } else if (feedback_cell->map() == *one_closure_cell_map()) {
+ feedback_cell->set_map(*many_closures_cell_map());
} else {
- DCHECK_EQ(vector->map(), *many_closures_cell_map());
+ DCHECK_EQ(feedback_cell->map(), *many_closures_cell_map());
}
- // Check that the optimized code in the feedback vector wasn't marked for
+ // Check that the optimized code in the feedback cell wasn't marked for
// deoptimization while not pointed to by any live JSFunction.
- if (vector->value()->IsFeedbackVector()) {
- FeedbackVector::cast(vector->value())
+ if (feedback_cell->value()->IsFeedbackVector()) {
+ FeedbackVector::cast(feedback_cell->value())
->EvictOptimizedCodeMarkedForDeoptimization(
*info, "new function from shared function info");
}
- result->set_feedback_vector_cell(*vector);
+ result->set_feedback_cell(*feedback_cell);
if (context_or_undefined->IsContext()) {
// Give compiler a chance to pre-initialize.
@@ -1759,17 +1811,13 @@ Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
Handle<Code> Factory::NewCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, MaybeHandle<HandlerTable> maybe_handler_table,
- MaybeHandle<ByteArray> maybe_source_position_table,
+ int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
uint32_t stub_key, bool is_turbofanned, int stack_slots,
- int safepoint_table_offset) {
+ int safepoint_table_offset, int handler_table_offset) {
Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
- Handle<HandlerTable> handler_table =
- maybe_handler_table.is_null() ? HandlerTable::Empty(isolate())
- : maybe_handler_table.ToHandleChecked();
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
? empty_byte_array()
@@ -1778,13 +1826,13 @@ Handle<Code> Factory::NewCode(
maybe_deopt_data.is_null() ? DeoptimizationData::Empty(isolate())
: maybe_deopt_data.ToHandleChecked();
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateCode(
- desc, kind, self_ref, builtin_index, *reloc_info, *data_container,
- *handler_table, *source_position_table, *deopt_data, movability,
- stub_key, is_turbofanned, stack_slots, safepoint_table_offset),
- Code);
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateCode(
+ desc, kind, self_ref, builtin_index, *reloc_info,
+ *data_container, *source_position_table, *deopt_data,
+ movability, stub_key, is_turbofanned, stack_slots,
+ safepoint_table_offset, handler_table_offset),
+ Code);
}
Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
@@ -2079,12 +2127,13 @@ Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
}
Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
- Handle<JSReceiver> sync_iterator) {
+ Handle<JSReceiver> sync_iterator, Handle<Object> next) {
Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map());
Handle<JSAsyncFromSyncIterator> iterator =
Handle<JSAsyncFromSyncIterator>::cast(NewJSObjectFromMap(map));
iterator->set_sync_iterator(*sync_iterator);
+ iterator->set_next(*next);
return iterator;
}
@@ -2453,14 +2502,10 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
MaybeHandle<String> name, FunctionKind kind, Handle<Code> code,
Handle<ScopeInfo> scope_info) {
- DCHECK(IsValidFunctionKind(kind));
Handle<SharedFunctionInfo> shared =
NewSharedFunctionInfo(name, code, IsConstructable(kind), kind);
shared->set_scope_info(*scope_info);
shared->set_outer_scope_info(*the_hole_value());
- if (IsGeneratorFunction(kind)) {
- shared->set_instance_class_name(isolate()->heap()->Generator_string());
- }
return shared;
}
@@ -2530,13 +2575,10 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
: BUILTIN_CODE(isolate(), ConstructedNonConstructable);
share->SetConstructStub(*construct_stub);
- share->set_instance_class_name(*Object_string());
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
- StaticFeedbackVectorSpec empty_spec;
- Handle<FeedbackMetadata> feedback_metadata =
- FeedbackMetadata::New(isolate(), &empty_spec);
+ Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(isolate());
share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
#if V8_SFI_HAS_UNIQUE_ID
@@ -2673,7 +2715,7 @@ Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPointInfo> new_break_point_info =
Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE, TENURED));
new_break_point_info->set_source_position(source_position);
- new_break_point_info->set_break_point_objects(*undefined_value());
+ new_break_point_info->set_break_points(*undefined_value());
return new_break_point_info;
}
@@ -2701,7 +2743,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
Handle<SourcePositionTableWithFrameCache>
Factory::NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
- Handle<NumberDictionary> stack_frame_cache) {
+ Handle<SimpleNumberDictionary> stack_frame_cache) {
Handle<SourcePositionTableWithFrameCache>
source_position_table_with_frame_cache =
Handle<SourcePositionTableWithFrameCache>::cast(
@@ -3084,6 +3126,19 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
return map;
}
+Handle<JSPromise> Factory::NewJSPromiseWithoutHook(PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateJSPromise(
+ *isolate()->promise_function(), pretenure),
+ JSPromise);
+}
+
+Handle<JSPromise> Factory::NewJSPromise(PretenureFlag pretenure) {
+ Handle<JSPromise> promise = NewJSPromiseWithoutHook(pretenure);
+ isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
+ return promise;
+}
+
// static
NewFunctionArgs NewFunctionArgs::ForWasm(Handle<String> name, Handle<Code> code,
Handle<Map> map) {
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index f0e9d63885..966b0602fe 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -27,6 +27,8 @@ class AliasedArgumentsEntry;
class BreakPointInfo;
class BreakPoint;
class BoilerplateDescription;
+class CallableTask;
+class CallbackTask;
class ConstantElementsPair;
class CoverageInfo;
class DebugInfo;
@@ -40,6 +42,7 @@ class JSWeakMap;
class NewFunctionArgs;
struct SourceRange;
class PreParsedScopeData;
+class PromiseResolveThenableJobTask;
class TemplateObjectDescription;
enum FunctionMode {
@@ -101,7 +104,8 @@ class V8_EXPORT_PRIVATE Factory final {
int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates an uninitialized fixed array. It must be filled by the caller.
- Handle<FixedArray> NewUninitializedFixedArray(int length);
+ Handle<FixedArray> NewUninitializedFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates a feedback vector whose slots are initialized with undefined
// values.
@@ -165,8 +169,7 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a new TemplateObjectDescription struct.
Handle<TemplateObjectDescription> NewTemplateObjectDescription(
- int hash, Handle<FixedArray> raw_strings,
- Handle<FixedArray> cooked_strings);
+ Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings);
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
@@ -326,6 +329,7 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a symbol.
Handle<Symbol> NewSymbol();
Handle<Symbol> NewPrivateSymbol();
+ Handle<Symbol> NewPrivateFieldSymbol();
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -386,16 +390,21 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<SourcePositionTableWithFrameCache>
NewSourcePositionTableWithFrameCache(
Handle<ByteArray> source_position_table,
- Handle<NumberDictionary> stack_frame_cache);
+ Handle<SimpleNumberDictionary> stack_frame_cache);
+
+ // Allocate various microtasks.
+ Handle<CallableTask> NewCallableTask(Handle<JSReceiver> callable,
+ Handle<Context> context);
+ Handle<CallbackTask> NewCallbackTask(Handle<Foreign> callback,
+ Handle<Foreign> data);
+ Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
+ Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
+ Handle<JSReceiver> thenable, Handle<Context> context);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
PretenureFlag pretenure = NOT_TENURED);
- // Allocate a new foreign object. The foreign is pretenured (allocated
- // directly in the old generation).
- Handle<Foreign> NewForeign(const AccessorDescriptor* foreign);
-
Handle<ByteArray> NewByteArray(int length,
PretenureFlag pretenure = NOT_TENURED);
@@ -417,9 +426,9 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
- Handle<Cell> NewNoClosuresCell(Handle<Object> value);
- Handle<Cell> NewOneClosureCell(Handle<Object> value);
- Handle<Cell> NewManyClosuresCell(Handle<Object> value);
+ Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
Handle<TransitionArray> NewTransitionArray(int capacity);
@@ -495,7 +504,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
- Handle<FreshlyAllocatedBigInt> NewBigInt(int length);
+ Handle<FreshlyAllocatedBigInt> NewBigInt(
+ int length, PretenureFlag pretenure = NOT_TENURED);
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
@@ -594,7 +604,7 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
- Handle<JSReceiver> sync_iterator);
+ Handle<JSReceiver> sync_iterator, Handle<Object> next);
Handle<JSMap> NewJSMap();
Handle<JSSet> NewJSSet();
@@ -635,12 +645,12 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
- Handle<Object> context_or_undefined, Handle<Cell> vector,
+ Handle<Object> context_or_undefined, Handle<FeedbackCell> feedback_cell,
PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
- Handle<Cell> vector, PretenureFlag pretenure = TENURED);
+ Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
@@ -677,15 +687,14 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Code> NewCode(const CodeDesc& desc, Code::Kind kind,
Handle<Object> self_reference,
int32_t builtin_index = Builtins::kNoBuiltinId,
- MaybeHandle<HandlerTable> maybe_handler_table =
- MaybeHandle<HandlerTable>(),
MaybeHandle<ByteArray> maybe_source_position_table =
MaybeHandle<ByteArray>(),
MaybeHandle<DeoptimizationData> maybe_deopt_data =
MaybeHandle<DeoptimizationData>(),
Movability movability = kMovable, uint32_t stub_key = 0,
bool is_turbofanned = false, int stack_slots = 0,
- int safepoint_table_offset = 0);
+ int safepoint_table_offset = 0,
+ int handler_table_offset = 0);
// Allocates a new, empty code object for use by builtin deserialization. The
// given {size} argument specifies the size of the entire code object.
@@ -848,6 +857,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Converts the given ToPrimitive hint to it's string representation.
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
+ Handle<JSPromise> NewJSPromise(PretenureFlag pretenure = NOT_TENURED);
+
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
@@ -875,6 +886,9 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a JSArray with no elements and no length.
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<JSPromise> NewJSPromiseWithoutHook(
+ PretenureFlag pretenure = NOT_TENURED);
};
// Utility class to simplify argument handling around JSFunction creation.
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/fast-dtoa.cc
index 9572c7026d..8c061dae7f 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/fast-dtoa.cc
@@ -229,7 +229,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen9;
*exponent = 9;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 29:
case 28:
case 27:
@@ -237,7 +238,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen8;
*exponent = 8;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 26:
case 25:
case 24:
@@ -245,7 +247,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen7;
*exponent = 7;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 23:
case 22:
case 21:
@@ -254,7 +257,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen6;
*exponent = 6;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 19:
case 18:
case 17:
@@ -262,7 +266,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen5;
*exponent = 5;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 16:
case 15:
case 14:
@@ -270,7 +275,8 @@ static void BiggestPowerTen(uint32_t number,
*power = kTen4;
*exponent = 4;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 13:
case 12:
case 11:
@@ -279,7 +285,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 1000;
*exponent = 3;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 9:
case 8:
case 7:
@@ -287,7 +294,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 100;
*exponent = 2;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 6:
case 5:
case 4:
@@ -295,7 +303,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 10;
*exponent = 1;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 3:
case 2:
case 1:
@@ -303,7 +312,8 @@ static void BiggestPowerTen(uint32_t number,
*power = 1;
*exponent = 0;
break;
- } // else fallthrough
+ }
+ V8_FALLTHROUGH;
case 0:
*power = 0;
*exponent = -1;
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index c3bdd82616..f5240baf1b 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -12,35 +12,23 @@
namespace v8 {
namespace internal {
-template <typename Derived>
-FeedbackSlot FeedbackVectorSpecBase<Derived>::AddSlot(FeedbackSlotKind kind) {
- int slot = This()->slots();
+FeedbackSlot FeedbackVectorSpec::AddSlot(FeedbackSlotKind kind) {
+ int slot = slots();
int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
- This()->append(kind);
+ append(kind);
for (int i = 1; i < entries_per_slot; i++) {
- This()->append(FeedbackSlotKind::kInvalid);
+ append(FeedbackSlotKind::kInvalid);
}
return FeedbackSlot(slot);
}
-template FeedbackSlot FeedbackVectorSpecBase<FeedbackVectorSpec>::AddSlot(
- FeedbackSlotKind kind);
-template FeedbackSlot FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::AddSlot(
- FeedbackSlotKind kind);
-
-template <typename Derived>
-FeedbackSlot FeedbackVectorSpecBase<Derived>::AddTypeProfileSlot() {
+FeedbackSlot FeedbackVectorSpec::AddTypeProfileSlot() {
FeedbackSlot slot = AddSlot(FeedbackSlotKind::kTypeProfile);
CHECK_EQ(FeedbackVectorSpec::kTypeProfileSlotIndex,
FeedbackVector::GetIndex(slot));
return slot;
}
-template FeedbackSlot
-FeedbackVectorSpecBase<FeedbackVectorSpec>::AddTypeProfileSlot();
-template FeedbackSlot
-FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::AddTypeProfileSlot();
-
bool FeedbackVectorSpec::HasTypeProfileSlot() const {
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
@@ -77,18 +65,12 @@ void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
set(index, Smi::FromInt(new_data));
}
-template Handle<FeedbackMetadata> FeedbackMetadata::New(
- Isolate* isolate, const StaticFeedbackVectorSpec* spec);
-template Handle<FeedbackMetadata> FeedbackMetadata::New(
- Isolate* isolate, const FeedbackVectorSpec* spec);
-
// static
-template <typename Spec>
Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
- const Spec* spec) {
+ const FeedbackVectorSpec* spec) {
Factory* factory = isolate->factory();
- const int slot_count = spec->slots();
+ const int slot_count = spec == nullptr ? 0 : spec->slots();
const int slot_kinds_length = VectorICComputer::word_count(slot_count);
const int length = slot_kinds_length + kReservedIndexCount;
if (length == kReservedIndexCount) {
@@ -96,6 +78,7 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
}
#ifdef DEBUG
for (int i = 0; i < slot_count;) {
+ DCHECK(spec);
FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
for (int j = 1; j < entry_size; j++) {
@@ -116,6 +99,7 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
Handle<FeedbackMetadata> metadata = Handle<FeedbackMetadata>::cast(array);
for (int i = 0; i < slot_count; i++) {
+ DCHECK(spec);
FeedbackSlot slot(i);
FeedbackSlotKind kind = spec->GetKind(slot);
metadata->SetKind(slot, kind);
@@ -266,7 +250,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
vector->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
break;
case FeedbackSlotKind::kCreateClosure: {
- Handle<Cell> cell = factory->NewNoClosuresCell(undefined_value);
+ Handle<FeedbackCell> cell = factory->NewNoClosuresCell(undefined_value);
vector->set(index, *cell);
break;
}
@@ -392,110 +376,11 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
- FeedbackSlotKind kind = iter.kind();
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- switch (kind) {
- case FeedbackSlotKind::kCall: {
- CallICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kLoadProperty: {
- LoadICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kLoadGlobalInsideTypeof:
- case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
- LoadGlobalICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kLoadKeyed: {
- KeyedLoadICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed: {
- StoreICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict: {
- StoreGlobalICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict: {
- KeyedStoreICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kForIn:
- case FeedbackSlotKind::kBinaryOp:
- case FeedbackSlotKind::kCompareOp: {
- DCHECK(Get(slot)->IsSmi());
- // don't clear these smi slots.
- // Set(slot, Smi::kZero);
- break;
- }
- case FeedbackSlotKind::kInstanceOf: {
- InstanceOfICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kCreateClosure:
- case FeedbackSlotKind::kTypeProfile: {
- break;
- }
- case FeedbackSlotKind::kLiteral: {
- Set(slot, Smi::kZero, SKIP_WRITE_BARRIER);
- feedback_updated = true;
- break;
- }
- case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- StoreDataPropertyInLiteralICNexus nexus(this, slot);
- if (!nexus.IsCleared()) {
- nexus.Clear();
- feedback_updated = true;
- }
- break;
- }
- case FeedbackSlotKind::kInvalid:
- case FeedbackSlotKind::kKindsNumber:
- UNREACHABLE();
- break;
- }
+ FeedbackNexus nexus(this, slot);
+ feedback_updated |= nexus.Clear();
}
}
return feedback_updated;
@@ -526,10 +411,92 @@ Handle<FixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
}
void FeedbackNexus::ConfigureUninitialized() {
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+ Isolate* isolate = GetIsolate();
+ switch (kind()) {
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+ SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kCall: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kInstanceOf: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool FeedbackNexus::Clear() {
+ bool feedback_updated = false;
+
+ switch (kind()) {
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kTypeProfile:
+ // We don't clear these kinds ever.
+ break;
+
+ case FeedbackSlotKind::kCompareOp:
+ case FeedbackSlotKind::kForIn:
+ case FeedbackSlotKind::kBinaryOp:
+ // We don't clear these, either.
+ break;
+
+ case FeedbackSlotKind::kLiteral:
+ SetFeedback(Smi::kZero, SKIP_WRITE_BARRIER);
+ feedback_updated = true;
+ break;
+
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
+ if (!IsCleared()) {
+ ConfigurePremonomorphic();
+ feedback_updated = true;
+ }
+ break;
+
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kInstanceOf:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ if (!IsCleared()) {
+ ConfigureUninitialized();
+ feedback_updated = true;
+ }
+ break;
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
+ }
+ return feedback_updated;
}
void FeedbackNexus::ConfigurePremonomorphic() {
@@ -557,70 +524,146 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
return changed;
}
-InlineCacheState LoadICNexus::StateFromFeedback() const {
+InlineCacheState FeedbackNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- }
+ switch (kind()) {
+ case FeedbackSlotKind::kCreateClosure:
+ case FeedbackSlotKind::kLiteral:
+ // CreateClosure and literal slots don't have a notion of state.
+ UNREACHABLE();
+ break;
- return UNINITIALIZED;
-}
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+ if (feedback->IsSmi()) return MONOMORPHIC;
-InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
+ Object* extra = GetFeedbackExtra();
+ if (!WeakCell::cast(feedback)->cleared() ||
+ extra != *FeedbackVector::UninitializedSentinel(isolate)) {
+ return MONOMORPHIC;
+ }
+ return UNINITIALIZED;
+ }
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ }
+ if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
+ }
+ if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
+ return PREMONOMORPHIC;
+ }
+ if (feedback->IsFixedArray()) {
+ // Determine state purely by our structure, don't check if the maps are
+ // cleared.
+ return POLYMORPHIC;
+ }
+ if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+ if (feedback->IsName()) {
+ DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()));
+ Object* extra = GetFeedbackExtra();
+ FixedArray* extra_array = FixedArray::cast(extra);
+ return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ }
+ UNREACHABLE();
+ }
+ case FeedbackSlotKind::kCall: {
+ if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ return GENERIC;
+ } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
+ return MONOMORPHIC;
+ }
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- } else if (feedback->IsName()) {
- Object* extra = GetFeedbackExtra();
- FixedArray* extra_array = FixedArray::cast(extra);
- return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
- }
+ CHECK(feedback == *FeedbackVector::UninitializedSentinel(isolate));
+ return UNINITIALIZED;
+ }
+ case FeedbackSlotKind::kBinaryOp: {
+ BinaryOperationHint hint = GetBinaryOperationFeedback();
+ if (hint == BinaryOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == BinaryOperationHint::kAny) {
+ return GENERIC;
+ }
- return UNINITIALIZED;
-}
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kCompareOp: {
+ CompareOperationHint hint = GetCompareOperationFeedback();
+ if (hint == CompareOperationHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == CompareOperationHint::kAny) {
+ return GENERIC;
+ }
-void GlobalICNexus::ConfigureUninitialized() {
- Isolate* isolate = GetIsolate();
- SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kForIn: {
+ ForInHint hint = GetForInFeedback();
+ if (hint == ForInHint::kNone) {
+ return UNINITIALIZED;
+ } else if (hint == ForInHint::kAny) {
+ return GENERIC;
+ }
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kInstanceOf: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
+ return MEGAMORPHIC;
+ }
+ return MONOMORPHIC;
+ }
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+
+ return MEGAMORPHIC;
+ }
+ case FeedbackSlotKind::kTypeProfile: {
+ if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ }
+ return MONOMORPHIC;
+ }
+
+ case FeedbackSlotKind::kInvalid:
+ case FeedbackSlotKind::kKindsNumber:
+ UNREACHABLE();
+ break;
+ }
+ return UNINITIALIZED;
}
-void GlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+ DCHECK(IsGlobalICKind(kind()));
Isolate* isolate = GetIsolate();
SetFeedback(*isolate->factory()->NewWeakCell(cell));
SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
}
-bool GlobalICNexus::ConfigureLexicalVarMode(int script_context_index,
+bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
int context_slot_index) {
+ DCHECK(IsGlobalICKind(kind()));
DCHECK_LE(0, script_context_index);
DCHECK_LE(0, context_slot_index);
if (!ContextIndexBits::is_valid(script_context_index) ||
@@ -637,112 +680,44 @@ bool GlobalICNexus::ConfigureLexicalVarMode(int script_context_index,
return true;
}
-void GlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
+void FeedbackNexus::ConfigureHandlerMode(Handle<Object> handler) {
+ DCHECK(IsGlobalICKind(kind()));
SetFeedback(GetIsolate()->heap()->empty_weak_cell());
SetFeedbackExtra(*handler);
}
-InlineCacheState GlobalICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
- if (feedback->IsSmi()) return MONOMORPHIC;
-
- Object* extra = GetFeedbackExtra();
- if (!WeakCell::cast(feedback)->cleared() ||
- extra != *FeedbackVector::UninitializedSentinel(isolate)) {
- return MONOMORPHIC;
- }
- return UNINITIALIZED;
-}
-
-InlineCacheState StoreICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- }
-
- return UNINITIALIZED;
-}
-
-InlineCacheState KeyedStoreICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
- return PREMONOMORPHIC;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- } else if (feedback->IsFixedArray()) {
- // Determine state purely by our structure, don't check if the maps are
- // cleared.
- return POLYMORPHIC;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- } else if (feedback->IsName()) {
- Object* extra = GetFeedbackExtra();
- FixedArray* extra_array = FixedArray::cast(extra);
- return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
- }
-
- return UNINITIALIZED;
-}
-
-InlineCacheState CallICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
- DCHECK(GetFeedbackExtra() ==
- *FeedbackVector::UninitializedSentinel(isolate) ||
- GetFeedbackExtra()->IsSmi());
-
- if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return GENERIC;
- } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
- return MONOMORPHIC;
- }
+int FeedbackNexus::GetCallCount() {
+ DCHECK(IsCallICKind(kind()));
- CHECK(feedback == *FeedbackVector::UninitializedSentinel(isolate));
- return UNINITIALIZED;
-}
-
-int CallICNexus::GetCallCount() {
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return CallCountField::decode(value);
}
-void CallICNexus::SetSpeculationMode(SpeculationMode mode) {
+void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
+ DCHECK(IsCallICKind(kind()));
+
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
- uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
- int result = static_cast<int>(CallCountField::decode(value) |
- SpeculationModeField::encode(mode));
+ uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
+ uint32_t value = CallCountField::encode(CallCountField::decode(count));
+ int result = static_cast<int>(value | SpeculationModeField::encode(mode));
SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER);
}
-SpeculationMode CallICNexus::GetSpeculationMode() {
+SpeculationMode FeedbackNexus::GetSpeculationMode() {
+ DCHECK(IsCallICKind(kind()));
+
Object* call_count = GetFeedbackExtra();
CHECK(call_count->IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return SpeculationModeField::decode(value);
}
-float CallICNexus::ComputeCallFrequency() {
+
+float FeedbackNexus::ComputeCallFrequency() {
+ DCHECK(IsCallICKind(kind()));
+
double const invocation_count = vector()->invocation_count();
double const call_count = GetCallCount();
if (invocation_count == 0) {
@@ -752,25 +727,23 @@ float CallICNexus::ComputeCallFrequency() {
return static_cast<float>(call_count / invocation_count);
}
-void CallICNexus::ConfigureUninitialized() {
- Isolate* isolate = GetIsolate();
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
-}
-
void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
Handle<Object> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- if (name.is_null()) {
+ if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) {
SetFeedback(*cell);
- SetFeedbackExtra(*handler);
+ SetFeedbackExtra(*name);
} else {
- Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
- SetFeedback(*name);
- array->set(0, *cell);
- array->set(1, *handler);
+ if (name.is_null()) {
+ SetFeedback(*cell);
+ SetFeedbackExtra(*handler);
+ } else {
+ Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ SetFeedback(*name);
+ array->set(0, *cell);
+ array->set(1, *handler);
+ }
}
}
@@ -798,6 +771,10 @@ void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
}
int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
@@ -831,6 +808,10 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
}
MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+
Object* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
@@ -868,6 +849,10 @@ MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
}
bool FeedbackNexus::FindHandlers(ObjectHandles* code_list, int length) const {
+ DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
+ IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+
Object* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
int count = 0;
@@ -901,23 +886,18 @@ bool FeedbackNexus::FindHandlers(ObjectHandles* code_list, int length) const {
return count == length;
}
-Name* KeyedLoadICNexus::FindFirstName() const {
- Object* feedback = GetFeedback();
- if (IsPropertyNameFeedback(feedback)) {
- return Name::cast(feedback);
- }
- return nullptr;
-}
-
-Name* KeyedStoreICNexus::FindFirstName() const {
- Object* feedback = GetFeedback();
- if (IsPropertyNameFeedback(feedback)) {
- return Name::cast(feedback);
+Name* FeedbackNexus::FindFirstName() const {
+ if (IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind())) {
+ Object* feedback = GetFeedback();
+ if (IsPropertyNameFeedback(feedback)) {
+ return Name::cast(feedback);
+ }
}
return nullptr;
}
-KeyedAccessLoadMode KeyedLoadICNexus::GetKeyedAccessLoadMode() const {
+KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
+ DCHECK(IsKeyedLoadICKind(kind()));
MapHandles maps;
ObjectHandles handlers;
@@ -933,7 +913,8 @@ KeyedAccessLoadMode KeyedLoadICNexus::GetKeyedAccessLoadMode() const {
return STANDARD_LOAD;
}
-KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
+KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
+ DCHECK(IsKeyedStoreICKind(kind()));
KeyedAccessStoreMode mode = STANDARD_STORE;
MapHandles maps;
ObjectHandles handlers;
@@ -974,7 +955,8 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
return mode;
}
-IcCheckType KeyedLoadICNexus::GetKeyType() const {
+IcCheckType FeedbackNexus::GetKeyType() const {
+ DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()));
Object* feedback = GetFeedback();
if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()));
@@ -982,79 +964,31 @@ IcCheckType KeyedLoadICNexus::GetKeyType() const {
return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
}
-IcCheckType KeyedStoreICNexus::GetKeyType() const {
- Object* feedback = GetFeedback();
- if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
- return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()));
- }
- return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
-}
-
-InlineCacheState BinaryOpICNexus::StateFromFeedback() const {
- BinaryOperationHint hint = GetBinaryOperationFeedback();
- if (hint == BinaryOperationHint::kNone) {
- return UNINITIALIZED;
- } else if (hint == BinaryOperationHint::kAny) {
- return GENERIC;
- }
-
- return MONOMORPHIC;
-}
-
-InlineCacheState CompareICNexus::StateFromFeedback() const {
- CompareOperationHint hint = GetCompareOperationFeedback();
- if (hint == CompareOperationHint::kNone) {
- return UNINITIALIZED;
- } else if (hint == CompareOperationHint::kAny) {
- return GENERIC;
- }
-
- return MONOMORPHIC;
-}
-
-BinaryOperationHint BinaryOpICNexus::GetBinaryOperationFeedback() const {
+BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kBinaryOp);
int feedback = Smi::ToInt(GetFeedback());
return BinaryOperationHintFromFeedback(feedback);
}
-CompareOperationHint CompareICNexus::GetCompareOperationFeedback() const {
+CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kCompareOp);
int feedback = Smi::ToInt(GetFeedback());
return CompareOperationHintFromFeedback(feedback);
}
-InlineCacheState ForInICNexus::StateFromFeedback() const {
- ForInHint hint = GetForInFeedback();
- if (hint == ForInHint::kNone) {
- return UNINITIALIZED;
- } else if (hint == ForInHint::kAny) {
- return GENERIC;
- }
- return MONOMORPHIC;
-}
-
-ForInHint ForInICNexus::GetForInFeedback() const {
+ForInHint FeedbackNexus::GetForInFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
int feedback = Smi::ToInt(GetFeedback());
return ForInHintFromFeedback(feedback);
}
-void InstanceOfICNexus::ConfigureUninitialized() {
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+Handle<FeedbackCell> FeedbackNexus::GetFeedbackCell() const {
+ DCHECK_EQ(FeedbackSlotKind::kCreateClosure, kind());
+ return handle(FeedbackCell::cast(GetFeedback()));
}
-InlineCacheState InstanceOfICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
- }
- return MONOMORPHIC;
-}
-
-MaybeHandle<JSObject> InstanceOfICNexus::GetConstructorFeedback() const {
+MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
+ DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf);
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
if (feedback->IsWeakCell() && !WeakCell::cast(feedback)->cleared()) {
@@ -1063,38 +997,6 @@ MaybeHandle<JSObject> InstanceOfICNexus::GetConstructorFeedback() const {
return MaybeHandle<JSObject>();
}
-InlineCacheState StoreDataPropertyInLiteralICNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- } else if (feedback->IsWeakCell()) {
- // Don't check if the map is cleared.
- return MONOMORPHIC;
- }
-
- return MEGAMORPHIC;
-}
-
-void StoreDataPropertyInLiteralICNexus::ConfigureMonomorphic(
- Handle<Name> name, Handle<Map> receiver_map) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-
- SetFeedback(*cell);
- SetFeedbackExtra(*name);
-}
-
-InlineCacheState CollectTypeProfileNexus::StateFromFeedback() const {
- Isolate* isolate = GetIsolate();
- Object* const feedback = GetFeedback();
-
- if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return UNINITIALIZED;
- }
- return MONOMORPHIC;
-}
-
namespace {
bool InList(Handle<ArrayList> types, Handle<String> type) {
@@ -1108,44 +1010,42 @@ bool InList(Handle<ArrayList> types, Handle<String> type) {
}
} // anonymous namespace
-void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
+void FeedbackNexus::Collect(Handle<String> type, int position) {
+ DCHECK(IsTypeProfileKind(kind()));
DCHECK_GE(position, 0);
Isolate* isolate = GetIsolate();
Object* const feedback = GetFeedback();
// Map source position to collection of types
- Handle<NumberDictionary> types;
+ Handle<SimpleNumberDictionary> types;
if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- types = NumberDictionary::New(isolate, 1);
+ types = SimpleNumberDictionary::New(isolate, 1);
} else {
- types = handle(NumberDictionary::cast(feedback));
+ types = handle(SimpleNumberDictionary::cast(feedback));
}
Handle<ArrayList> position_specific_types;
int entry = types->FindEntry(position);
- if (entry == NumberDictionary::kNotFound) {
+ if (entry == SimpleNumberDictionary::kNotFound) {
position_specific_types = ArrayList::New(isolate, 1);
- types = NumberDictionary::Set(
+ types = SimpleNumberDictionary::Set(
types, position, ArrayList::Add(position_specific_types, type));
} else {
DCHECK(types->ValueAt(entry)->IsArrayList());
position_specific_types = handle(ArrayList::cast(types->ValueAt(entry)));
if (!InList(position_specific_types, type)) { // Add type
- types = NumberDictionary::Set(
+ types = SimpleNumberDictionary::Set(
types, position, ArrayList::Add(position_specific_types, type));
}
}
SetFeedback(*types);
}
-void CollectTypeProfileNexus::Clear() {
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
-}
-
-std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
+std::vector<int> FeedbackNexus::GetSourcePositions() const {
+ DCHECK(IsTypeProfileKind(kind()));
std::vector<int> source_positions;
Isolate* isolate = GetIsolate();
@@ -1155,12 +1055,12 @@ std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
return source_positions;
}
- Handle<NumberDictionary> types =
- Handle<NumberDictionary>(NumberDictionary::cast(feedback), isolate);
+ Handle<SimpleNumberDictionary> types = Handle<SimpleNumberDictionary>(
+ SimpleNumberDictionary::cast(feedback), isolate);
- for (int index = NumberDictionary::kElementsStartIndex;
- index < types->length(); index += NumberDictionary::kEntrySize) {
- int key_index = index + NumberDictionary::kEntryKeyIndex;
+ for (int index = SimpleNumberDictionary::kElementsStartIndex;
+ index < types->length(); index += SimpleNumberDictionary::kEntrySize) {
+ int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
Object* key = types->get(key_index);
if (key->IsSmi()) {
int position = Smi::cast(key)->value();
@@ -1170,8 +1070,9 @@ std::vector<int> CollectTypeProfileNexus::GetSourcePositions() const {
return source_positions;
}
-std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
+std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
uint32_t position) const {
+ DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
Object* const feedback = GetFeedback();
@@ -1180,11 +1081,11 @@ std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
return types_for_position;
}
- Handle<NumberDictionary> types =
- Handle<NumberDictionary>(NumberDictionary::cast(feedback), isolate);
+ Handle<SimpleNumberDictionary> types = Handle<SimpleNumberDictionary>(
+ SimpleNumberDictionary::cast(feedback), isolate);
int entry = types->FindEntry(position);
- if (entry == NumberDictionary::kNotFound) {
+ if (entry == SimpleNumberDictionary::kNotFound) {
return types_for_position;
}
DCHECK(types->ValueAt(entry)->IsArrayList());
@@ -1201,16 +1102,17 @@ std::vector<Handle<String>> CollectTypeProfileNexus::GetTypesForSourcePositions(
namespace {
Handle<JSObject> ConvertToJSObject(Isolate* isolate,
- Handle<NumberDictionary> feedback) {
+ Handle<SimpleNumberDictionary> feedback) {
Handle<JSObject> type_profile =
isolate->factory()->NewJSObject(isolate->object_function());
- for (int index = NumberDictionary::kElementsStartIndex;
- index < feedback->length(); index += NumberDictionary::kEntrySize) {
- int key_index = index + NumberDictionary::kEntryKeyIndex;
+ for (int index = SimpleNumberDictionary::kElementsStartIndex;
+ index < feedback->length();
+ index += SimpleNumberDictionary::kEntrySize) {
+ int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
Object* key = feedback->get(key_index);
if (key->IsSmi()) {
- int value_index = index + NumberDictionary::kEntryValueIndex;
+ int value_index = index + SimpleNumberDictionary::kEntryValueIndex;
Handle<ArrayList> position_specific_types(
ArrayList::cast(feedback->get(value_index)));
@@ -1228,7 +1130,8 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
}
} // namespace
-JSObject* CollectTypeProfileNexus::GetTypeProfile() const {
+JSObject* FeedbackNexus::GetTypeProfile() const {
+ DCHECK(IsTypeProfileKind(kind()));
Isolate* isolate = GetIsolate();
Object* const feedback = GetFeedback();
@@ -1237,7 +1140,13 @@ JSObject* CollectTypeProfileNexus::GetTypeProfile() const {
return *isolate->factory()->NewJSObject(isolate->object_function());
}
- return *ConvertToJSObject(isolate, handle(NumberDictionary::cast(feedback)));
+ return *ConvertToJSObject(isolate,
+ handle(SimpleNumberDictionary::cast(feedback)));
+}
+
+void FeedbackNexus::ResetTypeProfile() {
+ DCHECK(IsTypeProfileKind(kind()));
+ SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
}
} // namespace internal
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 9f8096d138..8faff32649 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -85,6 +85,10 @@ inline bool IsStoreOwnICKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kStoreOwnNamed;
}
+inline bool IsStoreDataPropertyInLiteralKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreDataPropertyInLiteral;
+}
+
inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kStoreKeyedSloppy ||
kind == FeedbackSlotKind::kStoreKeyedStrict;
@@ -202,8 +206,8 @@ class FeedbackVector : public HeapObject {
FeedbackSlot GetTypeProfileSlot() const;
- static Handle<FeedbackVector> New(Isolate* isolate,
- Handle<SharedFunctionInfo> shared);
+ V8_EXPORT_PRIVATE static Handle<FeedbackVector> New(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared);
static Handle<FeedbackVector> Copy(Isolate* isolate,
Handle<FeedbackVector> vector);
@@ -293,15 +297,28 @@ class FeedbackVector : public HeapObject {
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
- void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
- FeedbackSlotKind kind); // NOLINT
-
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackVector);
};
-template <typename Derived>
-class V8_EXPORT_PRIVATE FeedbackVectorSpecBase {
+class V8_EXPORT_PRIVATE FeedbackVectorSpec {
public:
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
+ slot_kinds_.reserve(16);
+ }
+
+ int slots() const { return static_cast<int>(slot_kinds_.size()); }
+
+ FeedbackSlotKind GetKind(FeedbackSlot slot) const {
+ return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
+ }
+
+ bool HasTypeProfileSlot() const;
+
+ // If used, the TypeProfileSlot is always added as the first slot and its
+ // index is constant. If other slots are added before the TypeProfileSlot,
+ // this number changes.
+ static const int kTypeProfileSlotIndex = 0;
+
FeedbackSlot AddCallICSlot() { return AddSlot(FeedbackSlotKind::kCall); }
FeedbackSlot AddLoadICSlot() {
@@ -379,58 +396,6 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpecBase {
private:
FeedbackSlot AddSlot(FeedbackSlotKind kind);
- Derived* This() { return static_cast<Derived*>(this); }
-};
-
-class StaticFeedbackVectorSpec
- : public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
- public:
- StaticFeedbackVectorSpec() : slot_count_(0) {}
-
- int slots() const { return slot_count_; }
-
- FeedbackSlotKind GetKind(FeedbackSlot slot) const {
- DCHECK(slot.ToInt() >= 0 && slot.ToInt() < slot_count_);
- return kinds_[slot.ToInt()];
- }
-
- private:
- friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
-
- void append(FeedbackSlotKind kind) {
- DCHECK_LT(slot_count_, kMaxLength);
- kinds_[slot_count_++] = kind;
- }
-
- static const int kMaxLength = 12;
-
- int slot_count_;
- FeedbackSlotKind kinds_[kMaxLength];
-};
-
-class V8_EXPORT_PRIVATE FeedbackVectorSpec
- : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
- public:
- explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
- slot_kinds_.reserve(16);
- }
-
- int slots() const { return static_cast<int>(slot_kinds_.size()); }
-
- FeedbackSlotKind GetKind(FeedbackSlot slot) const {
- return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
- }
-
- bool HasTypeProfileSlot() const;
-
- // If used, the TypeProfileSlot is always added as the first slot and its
- // index is constant. If other slots are added before the TypeProfileSlot,
- // this number changes.
- static const int kTypeProfileSlotIndex = 0;
-
- private:
- friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
-
void append(FeedbackSlotKind kind) {
slot_kinds_.push_back(static_cast<unsigned char>(kind));
}
@@ -465,8 +430,9 @@ class FeedbackMetadata : public FixedArray {
// Returns slot kind for given slot.
FeedbackSlotKind GetKind(FeedbackSlot slot) const;
- template <typename Spec>
- static Handle<FeedbackMetadata> New(Isolate* isolate, const Spec* spec);
+ // If {spec} is null, then it is considered empty.
+ V8_EXPORT_PRIVATE static Handle<FeedbackMetadata> New(
+ Isolate* isolate, const FeedbackVectorSpec* spec = nullptr);
#ifdef OBJECT_PRINT
// For gdb debugging.
@@ -551,14 +517,15 @@ class FeedbackMetadataIterator {
};
// A FeedbackNexus is the combination of a FeedbackVector and a slot.
-// Derived classes customize the update and retrieval of feedback.
-class FeedbackNexus {
+class FeedbackNexus final {
public:
FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_handle_(vector), vector_(nullptr), slot_(slot) {}
+ : vector_handle_(vector),
+ vector_(nullptr),
+ slot_(slot),
+ kind_(vector->GetKind(slot)) {}
FeedbackNexus(FeedbackVector* vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {}
- virtual ~FeedbackNexus() {}
+ : vector_(vector), slot_(slot), kind_(vector->GetKind(slot)) {}
Handle<FeedbackVector> vector_handle() const {
DCHECK_NULL(vector_);
@@ -568,12 +535,20 @@ class FeedbackNexus {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
FeedbackSlot slot() const { return slot_; }
- FeedbackSlotKind kind() const { return vector()->GetKind(slot()); }
+ FeedbackSlotKind kind() const { return kind_; }
+
+ inline LanguageMode GetLanguageMode() const {
+ return vector()->GetLanguageMode(slot());
+ }
InlineCacheState ic_state() const { return StateFromFeedback(); }
bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
bool IsMegamorphic() const { return StateFromFeedback() == MEGAMORPHIC; }
bool IsGeneric() const { return StateFromFeedback() == GENERIC; }
+
+ void Print(std::ostream& os); // NOLINT
+
+ // For map-based ICs (load, keyed-load, store, keyed-store).
Map* FindFirstMap() const {
MapHandles maps;
ExtractMaps(&maps);
@@ -581,19 +556,19 @@ class FeedbackNexus {
return nullptr;
}
- virtual InlineCacheState StateFromFeedback() const = 0;
- virtual int ExtractMaps(MapHandles* maps) const;
- virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
- virtual bool FindHandlers(ObjectHandles* code_list, int length = -1) const;
- virtual Name* FindFirstName() const { return nullptr; }
+ InlineCacheState StateFromFeedback() const;
+ int ExtractMaps(MapHandles* maps) const;
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
+ bool FindHandlers(ObjectHandles* code_list, int length = -1) const;
- bool IsCleared() {
+ bool IsCleared() const {
InlineCacheState state = StateFromFeedback();
return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
}
- virtual void Clear() { ConfigureUninitialized(); }
- virtual void ConfigureUninitialized();
+ // Clear() returns true if the state of the underlying vector was changed.
+ bool Clear();
+ void ConfigureUninitialized();
void ConfigurePremonomorphic();
bool ConfigureMegamorphic(IcCheckType property_type);
@@ -608,51 +583,21 @@ class FeedbackNexus {
void ConfigurePolymorphic(Handle<Name> name, MapHandles const& maps,
ObjectHandles* handlers);
- protected:
- inline void SetFeedback(Object* feedback,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(Object* feedback_extra,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- Handle<FixedArray> EnsureArrayOfSize(int length);
- Handle<FixedArray> EnsureExtraArrayOfSize(int length);
-
- private:
- // The reason for having a vector handle and a raw pointer is that we can and
- // should use handles during IC miss, but not during GC when we clear ICs. If
- // you have a handle to the vector that is better because more operations can
- // be done, like allocation.
- Handle<FeedbackVector> vector_handle_;
- FeedbackVector* vector_;
- FeedbackSlot slot_;
-};
-
-class CallICNexus final : public FeedbackNexus {
- public:
- CallICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsCallIC(slot));
- }
- CallICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsCallIC(slot));
- }
+ BinaryOperationHint GetBinaryOperationFeedback() const;
+ CompareOperationHint GetCompareOperationFeedback() const;
+ ForInHint GetForInFeedback() const;
- void ConfigureUninitialized() final;
+ // For KeyedLoad ICs.
+ KeyedAccessLoadMode GetKeyedAccessLoadMode() const;
- InlineCacheState StateFromFeedback() const final;
+ // For KeyedStore ICs.
+ KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
- int ExtractMaps(MapHandles* maps) const final {
- // CallICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
+ // For KeyedLoad and KeyedStore ICs.
+ IcCheckType GetKeyType() const;
+ Name* FindFirstName() const;
+ // For Call ICs.
int GetCallCount();
void SetSpeculationMode(SpeculationMode mode);
SpeculationMode GetSpeculationMode();
@@ -663,91 +608,20 @@ class CallICNexus final : public FeedbackNexus {
typedef BitField<SpeculationMode, 0, 1> SpeculationModeField;
typedef BitField<uint32_t, 1, 31> CallCountField;
-};
-
-class LoadICNexus : public FeedbackNexus {
- public:
- LoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadIC(slot));
- }
- LoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsLoadIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- InlineCacheState StateFromFeedback() const override;
-};
-
-class KeyedLoadICNexus : public FeedbackNexus {
- public:
- KeyedLoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedLoadIC(slot));
- }
- KeyedLoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedLoadIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- KeyedAccessLoadMode GetKeyedAccessLoadMode() const;
- IcCheckType GetKeyType() const;
- InlineCacheState StateFromFeedback() const override;
- Name* FindFirstName() const override;
-};
-
-class StoreICNexus : public FeedbackNexus {
- public:
- StoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
- }
- StoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- InlineCacheState StateFromFeedback() const override;
-};
-// Base class for LoadGlobalICNexus and StoreGlobalICNexus.
-class GlobalICNexus : public FeedbackNexus {
- public:
- GlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsGlobalIC(slot));
- }
- GlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsGlobalIC(slot));
- }
+ // For CreateClosure ICs.
+ Handle<FeedbackCell> GetFeedbackCell() const;
- int ExtractMaps(MapHandles* maps) const final {
- // Load/StoreGlobalICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
+ // For InstanceOf ICs.
+ MaybeHandle<JSObject> GetConstructorFeedback() const;
- void ConfigureUninitialized() override;
+ // For Global Load and Store ICs.
void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
// Returns false if given combination of indices is not allowed.
bool ConfigureLexicalVarMode(int script_context_index,
int context_slot_index);
void ConfigureHandlerMode(Handle<Object> handler);
- InlineCacheState StateFromFeedback() const override;
-
// Bit positions in a smi that encodes lexical environment variable access.
#define LEXICAL_MODE_BIT_FIELDS(V, _) \
V(ContextIndexBits, unsigned, 12, _) \
@@ -758,187 +632,10 @@ class GlobalICNexus : public FeedbackNexus {
// Make sure we don't overflow the smi.
STATIC_ASSERT(LEXICAL_MODE_BIT_FIELDS_Ranges::kBitsCount <= kSmiValueSize);
-};
-
-class LoadGlobalICNexus : public GlobalICNexus {
- public:
- LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
- LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsLoadGlobalIC(slot));
- }
-};
-
-class StoreGlobalICNexus : public GlobalICNexus {
- public:
- StoreGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsStoreGlobalIC(slot));
- }
- StoreGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : GlobalICNexus(vector, slot) {
- DCHECK(vector->IsStoreGlobalIC(slot));
- }
-};
-// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
-// already exist in the boilerplate therefore we can use StoreIC.
-typedef StoreICNexus StoreOwnICNexus;
-
-class KeyedStoreICNexus : public FeedbackNexus {
- public:
- KeyedStoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedStoreIC(slot));
- }
- KeyedStoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK(vector->IsKeyedStoreIC(slot));
- }
-
- void Clear() override { ConfigurePremonomorphic(); }
-
- KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
- IcCheckType GetKeyType() const;
-
- InlineCacheState StateFromFeedback() const override;
- Name* FindFirstName() const override;
-};
-
-class BinaryOpICNexus final : public FeedbackNexus {
- public:
- BinaryOpICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
- }
- BinaryOpICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
- }
-
- InlineCacheState StateFromFeedback() const final;
- BinaryOperationHint GetBinaryOperationFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final {
- // BinaryOpICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class CompareICNexus final : public FeedbackNexus {
- public:
- CompareICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
- }
- CompareICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
- }
-
- InlineCacheState StateFromFeedback() const final;
- CompareOperationHint GetCompareOperationFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final {
- // CompareICs don't record map feedback.
- return 0;
- }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class ForInICNexus final : public FeedbackNexus {
- public:
- ForInICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kForIn, vector->GetKind(slot));
- }
- ForInICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kForIn, vector->GetKind(slot));
- }
-
- InlineCacheState StateFromFeedback() const final;
- ForInHint GetForInFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final { return 0; }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class InstanceOfICNexus final : public FeedbackNexus {
- public:
- InstanceOfICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kInstanceOf, vector->GetKind(slot));
- }
- InstanceOfICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kInstanceOf, vector->GetKind(slot));
- }
-
- void ConfigureUninitialized() final;
-
- InlineCacheState StateFromFeedback() const final;
- MaybeHandle<JSObject> GetConstructorFeedback() const;
-
- int ExtractMaps(MapHandles* maps) const final { return 0; }
- MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
- return MaybeHandle<Code>();
- }
- bool FindHandlers(ObjectHandles* code_list, int length = -1) const final {
- return length == 0;
- }
-};
-
-class StoreDataPropertyInLiteralICNexus : public FeedbackNexus {
- public:
- StoreDataPropertyInLiteralICNexus(Handle<FeedbackVector> vector,
- FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
- vector->GetKind(slot));
- }
- StoreDataPropertyInLiteralICNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
- vector->GetKind(slot));
- }
-
- void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map);
-
- InlineCacheState StateFromFeedback() const override;
-};
-
-// For each assignment, store the type of the value in the collection of types
-// in the feedback vector.
-class CollectTypeProfileNexus : public FeedbackNexus {
- public:
- CollectTypeProfileNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kTypeProfile, vector->GetKind(slot));
- }
- CollectTypeProfileNexus(FeedbackVector* vector, FeedbackSlot slot)
- : FeedbackNexus(vector, slot) {
- DCHECK_EQ(FeedbackSlotKind::kTypeProfile, vector->GetKind(slot));
- }
+ // For TypeProfile feedback vector slots.
+ // ResetTypeProfile will always reset type profile information.
+ void ResetTypeProfile();
// Add a type to the list of types for source position <position>.
void Collect(Handle<String> type, int position);
@@ -947,9 +644,24 @@ class CollectTypeProfileNexus : public FeedbackNexus {
std::vector<int> GetSourcePositions() const;
std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
- void Clear() override;
+ protected:
+ inline void SetFeedback(Object* feedback,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetFeedbackExtra(Object* feedback_extra,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ Handle<FixedArray> EnsureArrayOfSize(int length);
+ Handle<FixedArray> EnsureExtraArrayOfSize(int length);
- InlineCacheState StateFromFeedback() const override;
+ private:
+ // The reason for having a vector handle and a raw pointer is that we can and
+ // should use handles during IC miss, but not during GC when we clear ICs. If
+ // you have a handle to the vector that is better because more operations can
+ // be done, like allocation.
+ Handle<FeedbackVector> vector_handle_;
+ FeedbackVector* vector_;
+ FeedbackSlot slot_;
+ FeedbackSlotKind kind_;
};
inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index 61540773db..41fddb6e0b 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -99,4 +99,4 @@ inline FieldIndex FieldIndex::ForDescriptor(const Map* map,
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_FIELD_INDEX_INL_H_
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 9e390e3d46..a1552f050e 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -135,4 +135,4 @@ class FieldIndex final {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_FIELD_INDEX_H_
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index 40114f76d3..8eec7a5b58 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -7,7 +7,6 @@
#include "src/objects.h"
#include "src/objects/map.h"
-#include "src/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 8c3a08f81a..8fceed0783 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -9,6 +9,8 @@
// This include does not have a guard, because it is a template-style include,
// which can be included multiple times in different modes. It expects to have
// a mode defined before it's included. The modes are FLAG_MODE_... below:
+//
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
#define DEFINE_IMPLICATION(whenflag, thenflag) \
DEFINE_VALUE_IMPLICATION(whenflag, thenflag, true)
@@ -203,33 +205,36 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
DEFINE_IMPLICATION(harmony_class_fields, harmony_static_fields)
DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
+// Update bootstrapper.cc whenever adding a new feature flag.
+
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
- V(harmony_import_meta, "harmony import.meta property") \
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
- V(harmony_function_sent, "harmony function.sent") \
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony fields in class literals") \
V(harmony_static_fields, "harmony static fields in class literals") \
- V(harmony_bigint, "harmony arbitrary precision integers") \
- V(harmony_private_fields, "harmony private fields in class literals")
+ V(harmony_bigint, "harmony arbitrary precision integers")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
- V(harmony_function_tostring, "harmony Function.prototype.toString") \
V(harmony_restrict_constructor_return, \
"harmony disallow non undefined primitive return value from class " \
"constructor") \
- V(harmony_dynamic_import, "harmony dynamic import") \
V(harmony_public_fields, "harmony public fields in class literals") \
- V(harmony_optional_catch_binding, "allow omitting binding in catch blocks")
+ V(harmony_private_fields, "harmony private fields in class literals")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_regexp_named_captures, "harmony regexp named captures") \
- V(harmony_regexp_property, "harmony Unicode regexp property classes") \
- V(harmony_promise_finally, "harmony Promise.prototype.finally")
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_subsume_json, "harmony subsume JSON") \
+ V(harmony_string_trimming, "harmony String.prototype.trim{Start,End}") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_regexp_named_captures, "harmony regexp named captures") \
+ V(harmony_regexp_property, "harmony Unicode regexp property classes") \
+ V(harmony_function_tostring, "harmony Function.prototype.toString") \
+ V(harmony_promise_finally, "harmony Promise.prototype.finally") \
+ V(harmony_optional_catch_binding, "allow omitting binding in catch blocks") \
+ V(harmony_import_meta, "harmony import.meta property") \
+ V(harmony_dynamic_import, "harmony dynamic import")
#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) \
@@ -278,7 +283,6 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_IMPLICATION(future, background_compile)
DEFINE_IMPLICATION(future, write_protect_code_memory)
// Flags for experimental implementation features.
@@ -320,6 +324,18 @@ DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
+// Flags for Ignition for no-snapshot builds.
+#undef FLAG
+#ifndef V8_USE_SNAPSHOT
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+DEFINE_INT(interrupt_budget, 144 * KB,
+ "interrupt budget which should be used for the profiler counter")
+#undef FLAG
+#define FLAG FLAG_FULL
+
// Flags for Ignition.
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
"elide bytecodes which won't have any external effect")
@@ -470,6 +486,8 @@ DEFINE_BOOL(turbo_store_elimination, true,
DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
DEFINE_BOOL(turbo_rewrite_far_jumps, true,
"rewrite far to near jumps (ia32,x64)")
+DEFINE_BOOL(experimental_inline_promise_constructor, false,
+ "inline the Promise constructor in TurboFan")
#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
@@ -480,6 +498,11 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
"Enable mitigations for executing untrusted code")
#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
+DEFINE_BOOL(turbo_disable_switch_jump_table, false,
+ "do not emit jump-tables in Turbofan")
+DEFINE_IMPLICATION(untrusted_code_mitigations, turbo_disable_switch_jump_table)
+DEFINE_BOOL(branch_load_poisoning, false, "Mask loads with branch conditions.")
+
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
"simplifies execution model to make porting "
@@ -562,6 +585,8 @@ DEFINE_BOOL(experimental_wasm_threads, false,
"enable prototype threads for wasm")
DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
"enable non-trapping float-to-int conversions for wasm")
+DEFINE_BOOL(experimental_wasm_se, false,
+ "enable prototype sign extension opcodes for wasm")
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,
@@ -632,6 +657,9 @@ DEFINE_BOOL(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
+DEFINE_INT(trace_duplicate_threshold_kb, 0,
+ "print duplicate objects in the heap if their size is more than "
+ "given threshold")
DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
DEFINE_BOOL(trace_fragmentation_verbose, false,
"report fragmentation for old space (detailed)")
@@ -641,6 +669,7 @@ DEFINE_BOOL(trace_mutator_utilization, false,
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
+DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory")
@@ -733,6 +762,8 @@ DEFINE_INT(stress_scavenge, 0,
DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_marking)
DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_scavenge)
+DEFINE_BOOL(disable_abortjs, false, "disables AbortJS runtime function")
+
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
@@ -894,7 +925,10 @@ DEFINE_INT(histogram_interval, 600000,
// heap-snapshot-generator.cc
DEFINE_BOOL(heap_profiler_trace_objects, false,
"Dump heap object allocations/movements/size_updates")
-
+DEFINE_BOOL(heap_profiler_use_embedder_graph, true,
+ "Use the new EmbedderGraph API to get embedder nodes")
+DEFINE_INT(heap_snapshot_string_limit, 1024,
+ "truncate strings to this length in the heap snapshot")
// sampling-heap-profiler.cc
DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
@@ -941,7 +975,7 @@ DEFINE_BOOL(preparser_scope_analysis, true,
DEFINE_IMPLICATION(preparser_scope_analysis, aggressive_lazy_inner_functions)
// compiler.cc
-DEFINE_BOOL(background_compile, false, "enable background compilation")
+DEFINE_BOOL(background_compile, true, "enable background compilation")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -995,6 +1029,13 @@ DEFINE_INT(fuzzer_random_seed, 0,
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
+#ifdef V8_EMBEDDED_BUILTINS
+DEFINE_BOOL(stress_off_heap_code, false,
+ "Move code objects off-heap for testing.")
+#else
+FLAG_READONLY(BOOL, bool, stress_off_heap_code, false,
+ "Move code objects off-heap for testing.")
+#endif
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -1277,6 +1318,7 @@ DEFINE_BOOL(predictable, false, "enable predictable mode")
DEFINE_IMPLICATION(predictable, single_threaded)
DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
+DEFINE_NEG_IMPLICATION(single_threaded, wasm_async_compilation)
//
// Threading related flags.
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index d5a04ad933..a63a85e7fc 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -177,18 +177,32 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
Code* interpreter_bytecode_dispatch =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return (pc >= interpreter_entry_trampoline->instruction_start() &&
- pc < interpreter_entry_trampoline->instruction_end()) ||
- (pc >= interpreter_bytecode_advance->instruction_start() &&
- pc < interpreter_bytecode_advance->instruction_end()) ||
- (pc >= interpreter_bytecode_dispatch->instruction_start() &&
- pc < interpreter_bytecode_dispatch->instruction_end());
+ return (pc >= interpreter_entry_trampoline->InstructionStart() &&
+ pc < interpreter_entry_trampoline->InstructionEnd()) ||
+ (pc >= interpreter_bytecode_advance->InstructionStart() &&
+ pc < interpreter_bytecode_advance->InstructionEnd()) ||
+ (pc >= interpreter_bytecode_dispatch->InstructionStart() &&
+ pc < interpreter_bytecode_dispatch->InstructionEnd());
}
DISABLE_ASAN Address ReadMemoryAt(Address address) {
return Memory::Address_at(address);
}
+WasmInstanceObject* LookupWasmInstanceObjectFromStandardFrame(
+ const StandardFrame* frame) {
+ // TODO(titzer): WASM instances cannot be found from the code in the future.
+ WasmInstanceObject* ret =
+ FLAG_wasm_jit_to_native
+ ? WasmInstanceObject::GetOwningInstance(
+ frame->isolate()->wasm_engine()->code_manager()->LookupCode(
+ frame->pc()))
+ : WasmInstanceObject::GetOwningInstanceGC(frame->LookupCode());
+ // This is a live stack frame, there must be a live wasm instance available.
+ DCHECK_NOT_NULL(ret);
+ return ret;
+}
+
} // namespace
SafeStackFrameIterator::SafeStackFrameIterator(
@@ -376,8 +390,8 @@ Code* GetContainingCode(Isolate* isolate, Address pc) {
Code* StackFrame::LookupCode() const {
Code* result = GetContainingCode(isolate(), pc());
- DCHECK_GE(pc(), result->instruction_start());
- DCHECK_LT(pc(), result->instruction_end());
+ DCHECK_GE(pc(), result->InstructionStart());
+ DCHECK_LT(pc(), result->InstructionEnd());
return result;
}
@@ -385,12 +399,12 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address, Code* holder) {
Address pc = *pc_address;
DCHECK(holder->GetHeap()->GcSafeCodeContains(holder, pc));
- unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
+ unsigned pc_offset = static_cast<unsigned>(pc - holder->InstructionStart());
Object* code = holder;
- v->VisitRootPointer(Root::kTop, &code);
+ v->VisitRootPointer(Root::kTop, nullptr, &code);
if (code == holder) return;
holder = reinterpret_cast<Code*>(code);
- pc = holder->instruction_start() + pc_offset;
+ pc = holder->InstructionStart() + pc_offset;
*pc_address = pc;
if (FLAG_enable_embedded_constant_pool && constant_pool_address) {
*constant_pool_address = holder->constant_pool();
@@ -600,7 +614,7 @@ void ExitFrame::Iterate(RootVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
- v->VisitRootPointer(Root::kTop, &code_slot());
+ v->VisitRootPointer(Root::kTop, nullptr, &code_slot());
}
@@ -688,8 +702,28 @@ void PrintIndex(StringStream* accumulator, StackFrame::PrintMode mode,
int index) {
accumulator->Add((mode == StackFrame::OVERVIEW) ? "%5d: " : "[%d]: ", index);
}
+
+const char* StringForStackFrameType(StackFrame::Type type) {
+ switch (type) {
+#define CASE(value, name) \
+ case StackFrame::value: \
+ return #name;
+ STACK_FRAME_TYPE_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ }
+}
} // namespace
+void StackFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
+ DisallowHeapAllocation no_gc;
+ PrintIndex(accumulator, mode, index);
+ accumulator->Add(StringForStackFrameType(type()));
+ accumulator->Add(" [pc: %p]\n", pc());
+}
+
void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
DisallowHeapAllocation no_gc;
@@ -868,7 +902,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Visit the parameters that may be on top of the saved registers.
if (safepoint_entry.argument_count() > 0) {
- v->VisitRootPointers(Root::kTop, parameters_base,
+ v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
parameters_base + safepoint_entry.argument_count());
parameters_base += safepoint_entry.argument_count();
}
@@ -887,7 +921,8 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
if (safepoint_entry.HasRegisterAt(i)) {
int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
- v->VisitRootPointer(Root::kTop, parameters_base + reg_stack_index);
+ v->VisitRootPointer(Root::kTop, nullptr,
+ parameters_base + reg_stack_index);
}
}
// Skip the words containing the register values.
@@ -900,7 +935,8 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Visit the rest of the parameters if they are tagged.
if (has_tagged_params) {
- v->VisitRootPointers(Root::kTop, parameters_base, parameters_limit);
+ v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
+ parameters_limit);
}
// Visit pointer spill slots and locals.
@@ -908,7 +944,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
- v->VisitRootPointer(Root::kTop, parameters_limit + index);
+ v->VisitRootPointer(Root::kTop, nullptr, parameters_limit + index);
}
}
@@ -921,7 +957,8 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
if (!is_wasm() && !is_wasm_to_js()) {
// If this frame has JavaScript ABI, visit the context (in stub and JS
// frames) and the function (in JS frames).
- v->VisitRootPointers(Root::kTop, frame_header_base, frame_header_limit);
+ v->VisitRootPointers(Root::kTop, nullptr, frame_header_base,
+ frame_header_limit);
}
}
@@ -945,10 +982,10 @@ int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
Code* code = LookupCode();
DCHECK(code->is_turbofanned());
DCHECK_EQ(code->kind(), Code::BUILTIN);
- HandlerTable* table = HandlerTable::cast(code->handler_table());
- int pc_offset = static_cast<int>(pc() - code->entry());
+ HandlerTable table(code);
+ int pc_offset = static_cast<int>(pc() - code->InstructionStart());
*stack_slots = code->stack_slots();
- return table->LookupReturn(pc_offset);
+ return table.LookupReturn(pc_offset);
}
void OptimizedFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
@@ -1011,7 +1048,7 @@ void JavaScriptFrame::GetFunctions(
void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code* code = LookupCode();
- int offset = static_cast<int>(pc() - code->instruction_start());
+ int offset = static_cast<int>(pc() - code->InstructionStart());
AbstractCode* abstract_code = AbstractCode::cast(code);
FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver(),
function(), abstract_code,
@@ -1047,7 +1084,7 @@ Script* JavaScriptFrame::script() const {
int JavaScriptFrame::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
- DCHECK_EQ(0, LookupCode()->handler_table()->length());
+ DCHECK_EQ(0, LookupCode()->handler_table_offset());
DCHECK(!LookupCode()->is_optimized_code());
return -1;
}
@@ -1097,7 +1134,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
code_offset = iframe->GetBytecodeOffset();
} else {
Code* code = frame->unchecked_code();
- code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+ code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
}
PrintFunctionAndOffset(function, function->abstract_code(), code_offset,
file, print_line_number);
@@ -1155,7 +1192,7 @@ void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
code_offset = iframe->GetBytecodeOffset();
} else {
Code* code = frame->unchecked_code();
- code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+ code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
}
CollectFunctionAndOffsetForICStats(function, function->abstract_code(),
code_offset);
@@ -1467,8 +1504,8 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
// code to perform prediction there.
DCHECK_NULL(prediction);
Code* code = LookupCode();
- HandlerTable* table = HandlerTable::cast(code->handler_table());
- int pc_offset = static_cast<int>(pc() - code->entry());
+ HandlerTable table(code);
+ int pc_offset = static_cast<int>(pc() - code->InstructionStart());
if (stack_slots) *stack_slots = code->stack_slots();
// When the return pc has been replaced by a trampoline there won't be
@@ -1479,7 +1516,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
SafepointTable safepoints(code);
pc_offset = safepoints.find_return_pc(pc_offset);
}
- return table->LookupReturn(pc_offset);
+ return table.LookupReturn(pc_offset);
}
DeoptimizationData* OptimizedFrame::GetDeoptimizationData(
@@ -1588,9 +1625,8 @@ int InterpretedFrame::position() const {
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
- BytecodeArray* bytecode = function()->shared()->bytecode_array();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- return table->LookupRange(GetBytecodeOffset(), context_register, prediction);
+ HandlerTable table(function()->shared()->bytecode_array());
+ return table.LookupRange(GetBytecodeOffset(), context_register, prediction);
}
int InterpretedFrame::GetBytecodeOffset() const {
@@ -1711,9 +1747,8 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
.start()
: LookupCode()->instruction_start();
int pc = static_cast<int>(this->pc() - instruction_start);
- WasmSharedModuleData* shared = wasm_instance()->compiled_module()->shared();
Vector<const uint8_t> raw_func_name =
- shared->GetRawFunctionName(this->function_index());
+ shared()->GetRawFunctionName(this->function_index());
const int kMaxPrintedFunctionName = 64;
char func_name[kMaxPrintedFunctionName + 1];
int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
@@ -1744,23 +1779,24 @@ WasmCodeWrapper WasmCompiledFrame::wasm_code() const {
}
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
- WasmInstanceObject* obj =
- FLAG_wasm_jit_to_native
- ? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
- : WasmInstanceObject::GetOwningInstanceGC(LookupCode());
- // This is a live stack frame; it must have a live instance.
- DCHECK_NOT_NULL(obj);
- return obj;
+ return LookupWasmInstanceObjectFromStandardFrame(this);
+}
+
+WasmSharedModuleData* WasmCompiledFrame::shared() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)
+ ->compiled_module()
+ ->shared();
+}
+
+WasmCompiledModule* WasmCompiledFrame::compiled_module() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)->compiled_module();
}
uint32_t WasmCompiledFrame::function_index() const {
return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
}
-Script* WasmCompiledFrame::script() const {
- return wasm_instance()->compiled_module()->shared()->script();
-}
+Script* WasmCompiledFrame::script() const { return shared()->script(); }
int WasmCompiledFrame::position() const {
return FrameSummary::GetSingle(this).SourcePosition();
@@ -1770,7 +1806,8 @@ void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
WasmCodeWrapper code = wasm_code();
int offset = static_cast<int>(pc() - code.instructions().start());
- Handle<WasmInstanceObject> instance = code.wasm_instance();
+ Handle<WasmInstanceObject> instance(
+ LookupWasmInstanceObjectFromStandardFrame(this), isolate());
FrameSummary::WasmCompiledFrameSummary summary(
isolate(), instance, code, offset, at_to_number_conversion());
functions->push_back(summary);
@@ -1805,22 +1842,19 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
DCHECK_NOT_NULL(stack_slots);
if (!FLAG_wasm_jit_to_native) {
Code* code = LookupCode();
- HandlerTable* table = HandlerTable::cast(code->handler_table());
+ HandlerTable table(code);
int pc_offset = static_cast<int>(pc() - code->entry());
*stack_slots = code->stack_slots();
- return table->LookupReturn(pc_offset);
+ return table.LookupReturn(pc_offset);
}
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
- if (!code->IsAnonymous()) {
- Object* table_entry =
- code->owner()->compiled_module()->handler_table()->get(code->index());
- if (table_entry->IsHandlerTable()) {
- HandlerTable* table = HandlerTable::cast(table_entry);
- int pc_offset = static_cast<int>(pc() - code->instructions().start());
- *stack_slots = static_cast<int>(code->stack_slots());
- return table->LookupReturn(pc_offset);
- }
+ if (!code->IsAnonymous() && code->handler_table_offset() > 0) {
+ HandlerTable table(code->instructions().start(),
+ code->handler_table_offset());
+ int pc_offset = static_cast<int>(pc() - code->instructions().start());
+ *stack_slots = static_cast<int>(code->stack_slots());
+ return table.LookupReturn(pc_offset);
}
return -1;
}
@@ -1841,7 +1875,8 @@ void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
void WasmInterpreterEntryFrame::Summarize(
std::vector<FrameSummary>* functions) const {
- Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
+ Handle<WasmInstanceObject> instance(
+ LookupWasmInstanceObjectFromStandardFrame(this), isolate());
std::vector<std::pair<uint32_t, int>> interpreted_stack =
instance->debug_info()->GetInterpretedStack(fp());
@@ -1860,27 +1895,33 @@ Code* WasmInterpreterEntryFrame::unchecked_code() const {
}
}
+// TODO(titzer): deprecate this method.
WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
- WasmInstanceObject* ret =
- FLAG_wasm_jit_to_native
- ? WasmInstanceObject::GetOwningInstance(
- isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
- : WasmInstanceObject::GetOwningInstanceGC(LookupCode());
- // This is a live stack frame, there must be a live wasm instance available.
- DCHECK_NOT_NULL(ret);
- return ret;
+ return LookupWasmInstanceObjectFromStandardFrame(this);
}
-Script* WasmInterpreterEntryFrame::script() const {
- return wasm_instance()->compiled_module()->shared()->script();
+WasmDebugInfo* WasmInterpreterEntryFrame::debug_info() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)->debug_info();
}
+WasmSharedModuleData* WasmInterpreterEntryFrame::shared() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)
+ ->compiled_module()
+ ->shared();
+}
+
+WasmCompiledModule* WasmInterpreterEntryFrame::compiled_module() const {
+ return LookupWasmInstanceObjectFromStandardFrame(this)->compiled_module();
+}
+
+Script* WasmInterpreterEntryFrame::script() const { return shared()->script(); }
+
int WasmInterpreterEntryFrame::position() const {
return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
}
Object* WasmInterpreterEntryFrame::context() const {
- return wasm_instance()->compiled_module()->native_context();
+ return compiled_module()->native_context();
}
Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
@@ -2081,7 +2122,7 @@ void StandardFrame::IterateExpressions(RootVisitor* v) const {
const int offset = StandardFrameConstants::kLastObjectOffset;
Object** base = &Memory::Object_at(sp());
Object** limit = &Memory::Object_at(fp() + offset) + 1;
- v->VisitRootPointers(Root::kTop, base, limit);
+ v->VisitRootPointers(Root::kTop, nullptr, base, limit);
}
void JavaScriptFrame::Iterate(RootVisitor* v) const {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 0c988770f6..2bea6a3ca3 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -5,8 +5,6 @@
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
-#include "src/allocation.h"
-#include "src/flags.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/objects/code.h"
@@ -18,18 +16,20 @@ namespace wasm {
class WasmCode;
}
+// Forward declarations.
class AbstractCode;
class Debug;
-class ObjectVisitor;
-class StringStream;
-
-// Forward declarations.
class ExternalCallbackScope;
class Isolate;
+class ObjectVisitor;
class RootVisitor;
class StackFrameIteratorBase;
+class StringStream;
class ThreadLocalTop;
+class WasmCompiledModule;
+class WasmDebugInfo;
class WasmInstanceObject;
+class WasmSharedModuleData;
class InnerPointerToCodeCache {
public:
@@ -286,9 +286,8 @@ class StackFrame BASE_EMBEDDED {
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const { }
+ virtual void Print(StringStream* accumulator, PrintMode mode,
+ int index) const;
Isolate* isolate() const { return isolate_; }
@@ -890,6 +889,11 @@ class InterpretedFrame : public JavaScriptFrame {
static int GetBytecodeOffset(Address fp);
+ static InterpretedFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_interpreted());
+ return static_cast<InterpretedFrame*>(frame);
+ }
+
protected:
inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
@@ -968,7 +972,7 @@ class WasmCompiledFrame final : public StandardFrame {
Code* unchecked_code() const override;
// Accessors.
- WasmInstanceObject* wasm_instance() const;
+ WasmInstanceObject* wasm_instance() const; // TODO(titzer): deprecate.
WasmCodeWrapper wasm_code() const;
uint32_t function_index() const;
Script* script() const override;
@@ -989,6 +993,8 @@ class WasmCompiledFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
+ WasmCompiledModule* compiled_module() const;
+ WasmSharedModuleData* shared() const;
};
class WasmInterpreterEntryFrame final : public StandardFrame {
@@ -1008,7 +1014,9 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
Code* unchecked_code() const override;
// Accessors.
- WasmInstanceObject* wasm_instance() const;
+ WasmDebugInfo* debug_info() const;
+ WasmInstanceObject* wasm_instance() const; // TODO(titzer): deprecate.
+
Script* script() const override;
int position() const override;
Object* context() const override;
@@ -1025,6 +1033,8 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
private:
friend class StackFrameIteratorBase;
+ WasmCompiledModule* compiled_module() const;
+ WasmSharedModuleData* shared() const;
};
class WasmToJsFrame : public StubFrame {
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index b915e71ebe..7ffc6459fb 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -36,4 +36,4 @@ void EventHandler(const v8::JitCodeEvent* event);
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_GDB_JIT_H_
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 7845d71fb1..fe87060fb0 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -54,7 +54,7 @@ class GlobalHandles::Node {
index_ = 0;
set_active(false);
set_in_new_space_list(false);
- parameter_or_next_free_.next_free = nullptr;
+ data_.next_free = nullptr;
weak_callback_ = nullptr;
}
#endif
@@ -65,7 +65,7 @@ class GlobalHandles::Node {
DCHECK(static_cast<int>(index_) == index);
set_state(FREE);
set_in_new_space_list(false);
- parameter_or_next_free_.next_free = *first_free;
+ data_.next_free = *first_free;
*first_free = this;
}
@@ -75,7 +75,7 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_active(false);
set_state(NORMAL);
- parameter_or_next_free_.parameter = nullptr;
+ data_.parameter = nullptr;
weak_callback_ = nullptr;
IncreaseBlockUses();
}
@@ -100,6 +100,7 @@ class GlobalHandles::Node {
// Object slot accessors.
Object* object() const { return object_; }
Object** location() { return &object_; }
+ const char* label() { return state() == NORMAL ? data_.label : nullptr; }
Handle<Object> handle() { return Handle<Object>(location()); }
// Wrapper class ID accessors.
@@ -185,21 +186,21 @@ class GlobalHandles::Node {
// Callback parameter accessors.
void set_parameter(void* parameter) {
DCHECK(IsInUse());
- parameter_or_next_free_.parameter = parameter;
+ data_.parameter = parameter;
}
void* parameter() const {
DCHECK(IsInUse());
- return parameter_or_next_free_.parameter;
+ return data_.parameter;
}
// Accessors for next free node in the free list.
Node* next_free() {
DCHECK(state() == FREE);
- return parameter_or_next_free_.next_free;
+ return data_.next_free;
}
void set_next_free(Node* value) {
DCHECK(state() == FREE);
- parameter_or_next_free_.next_free = value;
+ data_.next_free = value;
}
void MakeWeak(void* parameter,
@@ -241,6 +242,11 @@ class GlobalHandles::Node {
return p;
}
+ void AnnotateStrongRetainer(const char* label) {
+ DCHECK_EQ(state(), NORMAL);
+ data_.label = label;
+ }
+
void CollectPhantomCallbackData(
Isolate* isolate,
std::vector<PendingPhantomCallback>* pending_phantom_callbacks) {
@@ -346,12 +352,15 @@ class GlobalHandles::Node {
// Handle specific callback - might be a weak reference in disguise.
WeakCallbackInfo<void>::Callback weak_callback_;
- // Provided data for callback. In FREE state, this is used for
- // the free list link.
+ // The meaning of this field depends on node state:
+ // state == FREE: it stores the next free node pointer.
+ // state == NORMAL: it stores the strong retainer label.
+ // otherwise: it stores the parameter for the weak callback.
union {
- void* parameter;
Node* next_free;
- } parameter_or_next_free_;
+ const char* label;
+ void* parameter;
+ } data_;
DISALLOW_COPY_AND_ASSIGN(Node);
};
@@ -447,7 +456,7 @@ void GlobalHandles::Node::IncreaseBlockUses() {
void GlobalHandles::Node::DecreaseBlockUses() {
NodeBlock* node_block = FindBlock();
GlobalHandles* global_handles = node_block->global_handles();
- parameter_or_next_free_.next_free = global_handles->first_free_;
+ data_.next_free = global_handles->first_free_;
global_handles->first_free_ = this;
node_block->DecreaseUses();
global_handles->isolate()->counters()->global_handles()->Decrement();
@@ -579,6 +588,11 @@ void* GlobalHandles::ClearWeakness(Object** location) {
return Node::FromLocation(location)->ClearWeakness();
}
+void GlobalHandles::AnnotateStrongRetainer(Object** location,
+ const char* label) {
+ Node::FromLocation(location)->AnnotateStrongRetainer(label);
+}
+
bool GlobalHandles::IsNearDeath(Object** location) {
return Node::FromLocation(location)->IsNearDeath();
}
@@ -596,7 +610,8 @@ void GlobalHandles::IterateWeakRootsForFinalizers(RootVisitor* v) {
DCHECK(!node->IsPhantomCallback());
DCHECK(!node->IsPhantomResetHandle());
// Finalizers need to survive.
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -635,7 +650,8 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
for (Node* node : new_space_nodes_) {
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && node->is_active())) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -649,7 +665,8 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
}
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && node->is_active())) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -685,7 +702,8 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
DCHECK(!node->IsPhantomCallback());
DCHECK(!node->IsPhantomResetHandle());
// Finalizers need to survive.
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -712,7 +730,8 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
}
} else {
// Node survived and needs to be visited.
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -902,17 +921,27 @@ int GlobalHandles::PostGarbageCollectionProcessing(
void GlobalHandles::IterateStrongRoots(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsStrongRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, it.node()->location());
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
+ it.node()->location());
}
}
}
+void GlobalHandles::IterateWeakRoots(RootVisitor* v) {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeak()) {
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
+ it.node()->location());
+ }
+ }
+}
DISABLE_CFI_PERF
void GlobalHandles::IterateAllRoots(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, it.node()->location());
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->label(),
+ it.node()->location());
}
}
}
@@ -921,7 +950,8 @@ DISABLE_CFI_PERF
void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
for (Node* node : new_space_nodes_) {
if (node->IsRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -932,7 +962,8 @@ void GlobalHandles::IterateNewSpaceRoots(RootVisitor* v, size_t start,
for (size_t i = start; i < end; ++i) {
Node* node = new_space_nodes_[i];
if (node->IsRetainer()) {
- v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->label(),
+ node->location());
}
}
}
@@ -1054,7 +1085,7 @@ void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
int limit = size_;
for (Object** block : blocks_) {
DCHECK_GT(limit, 0);
- visitor->VisitRootPointers(Root::kEternalHandles, block,
+ visitor->VisitRootPointers(Root::kEternalHandles, nullptr, block,
block + Min(limit, kSize));
limit -= kSize;
}
@@ -1062,7 +1093,8 @@ void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
for (int index : new_space_indices_) {
- visitor->VisitRootPointer(Root::kEternalHandles, GetLocation(index));
+ visitor->VisitRootPointer(Root::kEternalHandles, nullptr,
+ GetLocation(index));
}
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 59b94e371b..e96b74b883 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -77,6 +77,8 @@ class GlobalHandles {
static void MakeWeak(Object*** location_addr);
+ static void AnnotateStrongRetainer(Object** location, const char* label);
+
void RecordStats(HeapStats* stats);
// Returns the current number of handles to global objects.
@@ -108,10 +110,10 @@ class GlobalHandles {
int PostGarbageCollectionProcessing(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags);
- // Iterates over all strong handles.
void IterateStrongRoots(RootVisitor* v);
- // Iterates over all handles.
+ void IterateWeakRoots(RootVisitor* v);
+
void IterateAllRoots(RootVisitor* v);
void IterateAllNewSpaceRoots(RootVisitor* v);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index bc28181db1..7ffbf99d61 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -470,6 +470,7 @@ class FreeStoreAllocationPolicy;
class FunctionTemplateInfo;
class MemoryChunk;
class NumberDictionary;
+class SimpleNumberDictionary;
class NameDictionary;
class GlobalDictionary;
template <typename T> class MaybeHandle;
@@ -538,25 +539,6 @@ enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
enum class AccessMode { ATOMIC, NON_ATOMIC };
-// Possible outcomes for decisions.
-enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
-
-inline size_t hash_value(Decision decision) {
- return static_cast<uint8_t>(decision);
-}
-
-inline std::ostream& operator<<(std::ostream& os, Decision decision) {
- switch (decision) {
- case Decision::kUnknown:
- return os << "Unknown";
- case Decision::kTrue:
- return os << "True";
- case Decision::kFalse:
- return os << "False";
- }
- UNREACHABLE();
-}
-
// Supported write barrier modes.
enum WriteBarrierKind : uint8_t {
kNoWriteBarrier,
@@ -628,9 +610,6 @@ enum NativesFlag {
INSPECTOR_CODE
};
-// JavaScript defines two kinds of 'nil'.
-enum NilValue { kNullValue, kUndefinedValue };
-
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation. Restriction violations cause a syntax error.
enum ParseRestriction {
@@ -747,15 +726,6 @@ constexpr int kIeeeDoubleMantissaWordOffset = 4;
constexpr int kIeeeDoubleExponentWordOffset = 0;
#endif
-// AccessorCallback
-struct AccessorDescriptor {
- Object* (*getter)(Isolate* isolate, Object* object, void* data);
- Object* (*setter)(
- Isolate* isolate, JSObject* object, Object* value, void* data);
- void* data;
-};
-
-
// -----------------------------------------------------------------------------
// Macros
@@ -811,8 +781,6 @@ enum CpuFeature {
MIPSr2,
MIPSr6,
MIPS_SIMD, // MSA instructions
- // ARM64
- ALWAYS_ALIGN_CSP,
// PPC
FPR_GPR_MOV,
LWSYNC,
@@ -889,13 +857,6 @@ inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
UNREACHABLE();
}
-// Used to specify if a macro instruction must perform a smi check on tagged
-// values.
-enum SmiCheckType {
- DONT_DO_SMI_CHECK,
- DO_SMI_CHECK
-};
-
enum ScopeType : uint8_t {
EVAL_SCOPE, // The top-level scope for an eval source.
FUNCTION_SCOPE, // The top-level scope for a function.
@@ -1060,99 +1021,60 @@ enum VariableLocation : uint8_t {
// immediately initialized upon creation (kCreatedInitialized).
enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
-enum class HoleCheckMode { kRequired, kElided };
-
enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
// Serialized in PreparseData, so numeric values should not be changed.
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
-
-enum MinusZeroMode {
- TREAT_MINUS_ZERO_AS_ZERO,
- FAIL_ON_MINUS_ZERO
-};
-
-
-enum Signedness { kSigned, kUnsigned };
-
-enum FunctionKind : uint16_t {
- kNormalFunction = 0,
- kArrowFunction = 1 << 0,
- kGeneratorFunction = 1 << 1,
- kConciseMethod = 1 << 2,
- kDefaultConstructor = 1 << 3,
- kDerivedConstructor = 1 << 4,
- kBaseConstructor = 1 << 5,
- kGetterFunction = 1 << 6,
- kSetterFunction = 1 << 7,
- kAsyncFunction = 1 << 8,
- kModule = 1 << 9,
- kClassFieldsInitializerFunction = 1 << 10 | kConciseMethod,
- kLastFunctionKind = kClassFieldsInitializerFunction,
-
- kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
- kAccessorFunction = kGetterFunction | kSetterFunction,
- kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
- kDefaultDerivedConstructor = kDefaultConstructor | kDerivedConstructor,
- kClassConstructor =
- kBaseConstructor | kDerivedConstructor | kDefaultConstructor,
- kAsyncArrowFunction = kArrowFunction | kAsyncFunction,
- kAsyncConciseMethod = kAsyncFunction | kConciseMethod,
-
- // https://tc39.github.io/proposal-async-iteration/
- kAsyncConciseGeneratorMethod = kAsyncFunction | kConciseGeneratorMethod,
- kAsyncGeneratorFunction = kAsyncFunction | kGeneratorFunction
+enum FunctionKind : uint8_t {
+ kNormalFunction,
+ kArrowFunction,
+ kGeneratorFunction,
+ kConciseMethod,
+ kDerivedConstructor,
+ kBaseConstructor,
+ kGetterFunction,
+ kSetterFunction,
+ kAsyncFunction,
+ kModule,
+ kClassFieldsInitializerFunction,
+
+ kDefaultBaseConstructor,
+ kDefaultDerivedConstructor,
+ kAsyncArrowFunction,
+ kAsyncConciseMethod,
+
+ kConciseGeneratorMethod,
+ kAsyncConciseGeneratorMethod,
+ kAsyncGeneratorFunction,
+ kLastFunctionKind = kAsyncGeneratorFunction,
};
-inline bool IsValidFunctionKind(FunctionKind kind) {
- return kind == FunctionKind::kNormalFunction ||
- kind == FunctionKind::kArrowFunction ||
- kind == FunctionKind::kGeneratorFunction ||
- kind == FunctionKind::kModule ||
- kind == FunctionKind::kConciseMethod ||
- kind == FunctionKind::kConciseGeneratorMethod ||
- kind == FunctionKind::kGetterFunction ||
- kind == FunctionKind::kSetterFunction ||
- kind == FunctionKind::kAccessorFunction ||
- kind == FunctionKind::kDefaultBaseConstructor ||
- kind == FunctionKind::kDefaultDerivedConstructor ||
- kind == FunctionKind::kBaseConstructor ||
- kind == FunctionKind::kDerivedConstructor ||
- kind == FunctionKind::kAsyncFunction ||
- kind == FunctionKind::kAsyncArrowFunction ||
- kind == FunctionKind::kAsyncConciseMethod ||
- kind == FunctionKind::kAsyncConciseGeneratorMethod ||
- kind == FunctionKind::kAsyncGeneratorFunction ||
- kind == FunctionKind::kClassFieldsInitializerFunction;
-}
-
-
inline bool IsArrowFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kArrowFunction) != 0;
+ return kind == FunctionKind::kArrowFunction ||
+ kind == FunctionKind::kAsyncArrowFunction;
}
-
-inline bool IsGeneratorFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kGeneratorFunction) != 0;
+inline bool IsModule(FunctionKind kind) {
+ return kind == FunctionKind::kModule;
}
-inline bool IsModule(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kModule) != 0;
+inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
+ return kind == FunctionKind::kAsyncGeneratorFunction ||
+ kind == FunctionKind::kAsyncConciseGeneratorMethod;
}
-inline bool IsAsyncFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kAsyncFunction) != 0;
+inline bool IsGeneratorFunction(FunctionKind kind) {
+ return kind == FunctionKind::kGeneratorFunction ||
+ kind == FunctionKind::kConciseGeneratorMethod ||
+ IsAsyncGeneratorFunction(kind);
}
-inline bool IsAsyncGeneratorFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- const FunctionKind kMask = FunctionKind::kAsyncGeneratorFunction;
- return (kind & kMask) == kMask;
+inline bool IsAsyncFunction(FunctionKind kind) {
+ return kind == FunctionKind::kAsyncFunction ||
+ kind == FunctionKind::kAsyncArrowFunction ||
+ kind == FunctionKind::kAsyncConciseMethod ||
+ IsAsyncGeneratorFunction(kind);
}
inline bool IsResumableFunction(FunctionKind kind) {
@@ -1160,50 +1082,47 @@ inline bool IsResumableFunction(FunctionKind kind) {
}
inline bool IsConciseMethod(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kConciseMethod) != 0;
+ return kind == FunctionKind::kConciseMethod ||
+ kind == FunctionKind::kConciseGeneratorMethod ||
+ kind == FunctionKind::kAsyncConciseMethod ||
+ kind == FunctionKind::kAsyncConciseGeneratorMethod ||
+ kind == FunctionKind::kClassFieldsInitializerFunction;
}
inline bool IsGetterFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kGetterFunction) != 0;
+ return kind == FunctionKind::kGetterFunction;
}
inline bool IsSetterFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kSetterFunction) != 0;
+ return kind == FunctionKind::kSetterFunction;
}
inline bool IsAccessorFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kAccessorFunction) != 0;
+ return kind == FunctionKind::kGetterFunction ||
+ kind == FunctionKind::kSetterFunction;
}
-
inline bool IsDefaultConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kDefaultConstructor) != 0;
+ return kind == FunctionKind::kDefaultBaseConstructor ||
+ kind == FunctionKind::kDefaultDerivedConstructor;
}
-
inline bool IsBaseConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kBaseConstructor) != 0;
+ return kind == FunctionKind::kBaseConstructor ||
+ kind == FunctionKind::kDefaultBaseConstructor;
}
inline bool IsDerivedConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kDerivedConstructor) != 0;
+ return kind == FunctionKind::kDerivedConstructor ||
+ kind == FunctionKind::kDefaultDerivedConstructor;
}
inline bool IsClassConstructor(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return (kind & FunctionKind::kClassConstructor) != 0;
+ return IsBaseConstructor(kind) || IsDerivedConstructor(kind);
}
inline bool IsClassFieldsInitializerFunction(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
return kind == FunctionKind::kClassFieldsInitializerFunction;
}
@@ -1216,6 +1135,48 @@ inline bool IsConstructable(FunctionKind kind) {
return true;
}
+inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
+ switch (kind) {
+ case FunctionKind::kNormalFunction:
+ return os << "NormalFunction";
+ case FunctionKind::kArrowFunction:
+ return os << "ArrowFunction";
+ case FunctionKind::kGeneratorFunction:
+ return os << "GeneratorFunction";
+ case FunctionKind::kConciseMethod:
+ return os << "ConciseMethod";
+ case FunctionKind::kDerivedConstructor:
+ return os << "DerivedConstructor";
+ case FunctionKind::kBaseConstructor:
+ return os << "BaseConstructor";
+ case FunctionKind::kGetterFunction:
+ return os << "GetterFunction";
+ case FunctionKind::kSetterFunction:
+ return os << "SetterFunction";
+ case FunctionKind::kAsyncFunction:
+ return os << "AsyncFunction";
+ case FunctionKind::kModule:
+ return os << "Module";
+ case FunctionKind::kClassFieldsInitializerFunction:
+ return os << "ClassFieldsInitializerFunction";
+ case FunctionKind::kDefaultBaseConstructor:
+ return os << "DefaultBaseConstructor";
+ case FunctionKind::kDefaultDerivedConstructor:
+ return os << "DefaultDerivedConstructor";
+ case FunctionKind::kAsyncArrowFunction:
+ return os << "AsyncArrowFunction";
+ case FunctionKind::kAsyncConciseMethod:
+ return os << "AsyncConciseMethod";
+ case FunctionKind::kConciseGeneratorMethod:
+ return os << "ConciseGeneratorMethod";
+ case FunctionKind::kAsyncConciseGeneratorMethod:
+ return os << "AsyncConciseGeneratorMethod";
+ case FunctionKind::kAsyncGeneratorFunction:
+ return os << "AsyncGeneratorFunction";
+ }
+ UNREACHABLE();
+}
+
enum class InterpreterPushArgsMode : unsigned {
kJSFunction,
kWithFinalSpread,
@@ -1406,6 +1367,8 @@ enum ExternalArrayType {
kExternalFloat32Array,
kExternalFloat64Array,
kExternalUint8ClampedArray,
+ kExternalBigInt64Array,
+ kExternalBigUint64Array,
};
struct AssemblerDebugInfo {
@@ -1462,6 +1425,8 @@ inline std::ostream& operator<<(std::ostream& os,
return os;
}
+enum class BlockingBehavior { kBlock, kDontBlock };
+
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
@@ -1476,9 +1441,7 @@ enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
C(PendingHandlerFP, pending_handler_fp) \
C(PendingHandlerSP, pending_handler_sp) \
C(ExternalCaughtException, external_caught_exception) \
- C(JSEntrySP, js_entry_sp) \
- C(MicrotaskQueueBailoutIndex, microtask_queue_bailout_index) \
- C(MicrotaskQueueBailoutCount, microtask_queue_bailout_count)
+ C(JSEntrySP, js_entry_sp)
enum IsolateAddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
diff --git a/deps/v8/src/handler-table.cc b/deps/v8/src/handler-table.cc
new file mode 100644
index 0000000000..72e0e6caf8
--- /dev/null
+++ b/deps/v8/src/handler-table.cc
@@ -0,0 +1,220 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handler-table.h"
+
+#include <iomanip>
+
+#include "src/assembler-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
+
+namespace v8 {
+namespace internal {
+
+HandlerTable::HandlerTable(Code* code)
+ : HandlerTable(code->InstructionStart(), code->handler_table_offset()) {}
+
+HandlerTable::HandlerTable(BytecodeArray* bytecode_array)
+ : HandlerTable(bytecode_array->handler_table()) {}
+
+HandlerTable::HandlerTable(ByteArray* byte_array)
+ : number_of_entries_(byte_array->length() / kRangeEntrySize /
+ sizeof(int32_t)),
+#ifdef DEBUG
+ mode_(kRangeBasedEncoding),
+#endif
+ raw_encoded_data_(byte_array->GetDataStartAddress()) {
+}
+
+HandlerTable::HandlerTable(Address instruction_start,
+ size_t handler_table_offset)
+ : number_of_entries_(0),
+#ifdef DEBUG
+ mode_(kReturnAddressBasedEncoding),
+#endif
+ raw_encoded_data_(instruction_start + handler_table_offset) {
+ if (handler_table_offset > 0) {
+ number_of_entries_ = Memory::int32_at(raw_encoded_data_);
+ raw_encoded_data_ += sizeof(int32_t);
+ }
+}
+
+int HandlerTable::GetRangeStart(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeStartIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+int HandlerTable::GetRangeEnd(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeEndIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+int HandlerTable::GetRangeHandler(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeHandlerIndex;
+ return HandlerOffsetField::decode(
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+}
+
+int HandlerTable::GetRangeData(int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeDataIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+HandlerTable::CatchPrediction HandlerTable::GetRangePrediction(
+ int index) const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfRangeEntries());
+ int offset = index * kRangeEntrySize + kRangeHandlerIndex;
+ return HandlerPredictionField::decode(
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+}
+
+int HandlerTable::GetReturnOffset(int index) const {
+ DCHECK_EQ(kReturnAddressBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfReturnEntries());
+ int offset = index * kReturnEntrySize + kReturnOffsetIndex;
+ return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+}
+
+int HandlerTable::GetReturnHandler(int index) const {
+ DCHECK_EQ(kReturnAddressBasedEncoding, mode_);
+ DCHECK_LT(index, NumberOfReturnEntries());
+ int offset = index * kReturnEntrySize + kReturnHandlerIndex;
+ return HandlerOffsetField::decode(
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+}
+
+void HandlerTable::SetRangeStart(int index, int value) {
+ int offset = index * kRangeEntrySize + kRangeStartIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+void HandlerTable::SetRangeEnd(int index, int value) {
+ int offset = index * kRangeEntrySize + kRangeEndIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+void HandlerTable::SetRangeHandler(int index, int handler_offset,
+ CatchPrediction prediction) {
+ int value = HandlerOffsetField::encode(handler_offset) |
+ HandlerPredictionField::encode(prediction);
+ int offset = index * kRangeEntrySize + kRangeHandlerIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+void HandlerTable::SetRangeData(int index, int value) {
+ int offset = index * kRangeEntrySize + kRangeDataIndex;
+ Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+}
+
+// static
+int HandlerTable::LengthForRange(int entries) {
+ return entries * kRangeEntrySize * sizeof(int32_t);
+}
+
+// static
+int HandlerTable::EmitReturnTableStart(Assembler* masm, int entries) {
+ masm->DataAlign(sizeof(int32_t)); // Make sure entries are aligned.
+ masm->RecordComment(";;; Exception handler table.");
+ int table_start = masm->pc_offset();
+ masm->dd(entries);
+ return table_start;
+}
+
+// static
+void HandlerTable::EmitReturnEntry(Assembler* masm, int offset, int handler) {
+ masm->dd(offset);
+ masm->dd(HandlerOffsetField::encode(handler));
+}
+
+int HandlerTable::NumberOfRangeEntries() const {
+ DCHECK_EQ(kRangeBasedEncoding, mode_);
+ return number_of_entries_;
+}
+
+int HandlerTable::NumberOfReturnEntries() const {
+ DCHECK_EQ(kReturnAddressBasedEncoding, mode_);
+ return number_of_entries_;
+}
+
+int HandlerTable::LookupRange(int pc_offset, int* data_out,
+ CatchPrediction* prediction_out) {
+ int innermost_handler = -1;
+#ifdef DEBUG
+ // Assuming that ranges are well nested, we don't need to track the innermost
+ // offsets. This is just to verify that the table is actually well nested.
+ int innermost_start = std::numeric_limits<int>::min();
+ int innermost_end = std::numeric_limits<int>::max();
+#endif
+ for (int i = 0; i < NumberOfRangeEntries(); ++i) {
+ int start_offset = GetRangeStart(i);
+ int end_offset = GetRangeEnd(i);
+ int handler_offset = GetRangeHandler(i);
+ int handler_data = GetRangeData(i);
+ CatchPrediction prediction = GetRangePrediction(i);
+ if (pc_offset >= start_offset && pc_offset < end_offset) {
+ DCHECK_GE(start_offset, innermost_start);
+ DCHECK_LT(end_offset, innermost_end);
+ innermost_handler = handler_offset;
+#ifdef DEBUG
+ innermost_start = start_offset;
+ innermost_end = end_offset;
+#endif
+ if (data_out) *data_out = handler_data;
+ if (prediction_out) *prediction_out = prediction;
+ }
+ }
+ return innermost_handler;
+}
+
+// TODO(turbofan): Make sure table is sorted and use binary search.
+int HandlerTable::LookupReturn(int pc_offset) {
+ for (int i = 0; i < NumberOfReturnEntries(); ++i) {
+ int return_offset = GetReturnOffset(i);
+ if (pc_offset == return_offset) {
+ return GetReturnHandler(i);
+ }
+ }
+ return -1;
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
+ os << " from to hdlr (prediction, data)\n";
+ for (int i = 0; i < NumberOfRangeEntries(); ++i) {
+ int pc_start = GetRangeStart(i);
+ int pc_end = GetRangeEnd(i);
+ int handler_offset = GetRangeHandler(i);
+ int handler_data = GetRangeData(i);
+ CatchPrediction prediction = GetRangePrediction(i);
+ os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
+ << ") -> " << std::setw(4) << handler_offset
+ << " (prediction=" << prediction << ", data=" << handler_data << ")\n";
+ }
+}
+
+void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
+ os << " off hdlr\n";
+ for (int i = 0; i < NumberOfReturnEntries(); ++i) {
+ int pc_offset = GetReturnOffset(i);
+ int handler_offset = GetReturnHandler(i);
+ os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
+ << handler_offset << "\n";
+ }
+}
+
+#endif // ENABLE_DISASSEMBLER
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/handler-table.h b/deps/v8/src/handler-table.h
new file mode 100644
index 0000000000..c2e282001c
--- /dev/null
+++ b/deps/v8/src/handler-table.h
@@ -0,0 +1,135 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLER_TABLE_H_
+#define V8_HANDLER_TABLE_H_
+
+#include "src/assert-scope.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Assembler;
+class ByteArray;
+class BytecodeArray;
+
+// HandlerTable is a byte array containing entries for exception handlers in
+// the code object it is associated with. The tables come in two flavors:
+// 1) Based on ranges: Used for unoptimized code. Stored in a {ByteArray} that
+// is attached to each {BytecodeArray}. Contains one entry per exception
+// handler and a range representing the try-block covered by that handler.
+// Layout looks as follows:
+// [ range-start , range-end , handler-offset , handler-data ]
+// 2) Based on return addresses: Used for turbofanned code. Stored directly in
+// the instruction stream of the {Code} object. Contains one entry per
+// call-site that could throw an exception. Layout looks as follows:
+// [ return-address-offset , handler-offset ]
+class V8_EXPORT_PRIVATE HandlerTable {
+ public:
+ // Conservative prediction whether a given handler will locally catch an
+ // exception or cause a re-throw to outside the code boundary. Since this is
+ // undecidable it is merely an approximation (e.g. useful for debugger).
+ enum CatchPrediction {
+ UNCAUGHT, // The handler will (likely) rethrow the exception.
+ CAUGHT, // The exception will be caught by the handler.
+ PROMISE, // The exception will be caught and cause a promise rejection.
+ DESUGARING, // The exception will be caught, but both the exception and
+ // the catching are part of a desugaring and should therefore
+ // not be visible to the user (we won't notify the debugger of
+ // such exceptions).
+ ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
+ // in the desugaring of an async function, so special
+ // async/await handling in the debugger can take place.
+ };
+
+ // Constructors for the various encodings.
+ explicit HandlerTable(Code* code);
+ explicit HandlerTable(ByteArray* byte_array);
+ explicit HandlerTable(BytecodeArray* bytecode_array);
+ explicit HandlerTable(Address instruction_start, size_t handler_table_offset);
+
+ // Getters for handler table based on ranges.
+ int GetRangeStart(int index) const;
+ int GetRangeEnd(int index) const;
+ int GetRangeHandler(int index) const;
+ int GetRangeData(int index) const;
+
+ // Setters for handler table based on ranges.
+ void SetRangeStart(int index, int value);
+ void SetRangeEnd(int index, int value);
+ void SetRangeHandler(int index, int offset, CatchPrediction pred);
+ void SetRangeData(int index, int value);
+
+ // Returns the required length of the underlying byte array.
+ static int LengthForRange(int entries);
+
+ // Emitters for handler table based on return addresses.
+ static int EmitReturnTableStart(Assembler* masm, int entries);
+ static void EmitReturnEntry(Assembler* masm, int offset, int handler);
+
+ // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
+ // the start of the potentially throwing instruction (using return addresses
+ // for this value would be invalid).
+ int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
+
+ // Lookup handler in a table based on return addresses.
+ int LookupReturn(int pc_offset);
+
+ // Returns the number of entries in the table.
+ int NumberOfRangeEntries() const;
+ int NumberOfReturnEntries() const;
+
+#ifdef ENABLE_DISASSEMBLER
+ void HandlerTableRangePrint(std::ostream& os); // NOLINT
+ void HandlerTableReturnPrint(std::ostream& os); // NOLINT
+#endif
+
+ private:
+ enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
+
+ // Getters for handler table based on ranges.
+ CatchPrediction GetRangePrediction(int index) const;
+
+ // Getters for handler table based on return addresses.
+ int GetReturnOffset(int index) const;
+ int GetReturnHandler(int index) const;
+
+ // Number of entries in the loaded handler table.
+ int number_of_entries_;
+
+#ifdef DEBUG
+ // The encoding mode of the table. Mostly useful for debugging to check that
+ // used accessors and constructors fit together.
+ EncodingMode mode_;
+#endif
+
+ // Direct pointer into the encoded data. This pointer points into object on
+ // the GC heap (either {ByteArray} or {Code}) and hence would become stale
+ // during a collection. Hence we disallow any allocation.
+ Address raw_encoded_data_;
+ DisallowHeapAllocation no_gc_;
+
+ // Layout description for handler table based on ranges.
+ static const int kRangeStartIndex = 0;
+ static const int kRangeEndIndex = 1;
+ static const int kRangeHandlerIndex = 2;
+ static const int kRangeDataIndex = 3;
+ static const int kRangeEntrySize = 4;
+
+ // Layout description for handler table based on return addresses.
+ static const int kReturnOffsetIndex = 0;
+ static const int kReturnHandlerIndex = 1;
+ static const int kReturnEntrySize = 2;
+
+ // Encoding of the {handler} field.
+ class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
+ class HandlerOffsetField : public BitField<int, 3, 29> {};
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLER_TABLE_H_
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index e747ba2720..ae06892675 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -5,245 +5,258 @@
#ifndef V8_HEAP_SYMBOLS_H_
#define V8_HEAP_SYMBOLS_H_
-#define INTERNALIZED_STRING_LIST(V) \
- V(anonymous_function_string, "(anonymous function)") \
- V(anonymous_string, "anonymous") \
- V(add_string, "add") \
- V(apply_string, "apply") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(arguments_to_string, "[object Arguments]") \
- V(Array_string, "Array") \
- V(ArrayIterator_string, "Array Iterator") \
- V(assign_string, "assign") \
- V(async_string, "async") \
- V(await_string, "await") \
- V(array_to_string, "[object Array]") \
- V(boolean_to_string, "[object Boolean]") \
- V(date_to_string, "[object Date]") \
- V(error_to_string, "[object Error]") \
- V(function_to_string, "[object Function]") \
- V(number_to_string, "[object Number]") \
- V(object_to_string, "[object Object]") \
- V(regexp_to_string, "[object RegExp]") \
- V(string_to_string, "[object String]") \
- V(bigint_string, "bigint") \
- V(BigInt_string, "BigInt") \
- V(bind_string, "bind") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(bound__string, "bound ") \
- V(buffer_string, "buffer") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(call_string, "call") \
- V(callee_string, "callee") \
- V(caller_string, "caller") \
- V(cell_value_string, "%cell_value") \
- V(char_at_string, "CharAt") \
- V(closure_string, "(closure)") \
- V(column_string, "column") \
- V(configurable_string, "configurable") \
- V(constructor_string, "constructor") \
- V(construct_string, "construct") \
- V(create_string, "create") \
- V(currency_string, "currency") \
- V(Date_string, "Date") \
- V(dayperiod_string, "dayperiod") \
- V(day_string, "day") \
- V(decimal_string, "decimal") \
- V(default_string, "default") \
- V(defineProperty_string, "defineProperty") \
- V(deleteProperty_string, "deleteProperty") \
- V(did_handle_string, "didHandle") \
- V(display_name_string, "displayName") \
- V(done_string, "done") \
- V(dotAll_string, "dotAll") \
- V(dot_catch_string, ".catch") \
- V(dot_for_string, ".for") \
- V(dot_generator_object_string, ".generator_object") \
- V(dot_iterator_string, ".iterator") \
- V(dot_result_string, ".result") \
- V(dot_switch_tag_string, ".switch_tag") \
- V(dot_string, ".") \
- V(exec_string, "exec") \
- V(entries_string, "entries") \
- V(enqueue_string, "enqueue") \
- V(enumerable_string, "enumerable") \
- V(era_string, "era") \
- V(Error_string, "Error") \
- V(eval_string, "eval") \
- V(EvalError_string, "EvalError") \
- V(false_string, "false") \
- V(flags_string, "flags") \
- V(fraction_string, "fraction") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(Generator_string, "Generator") \
- V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
- V(getPrototypeOf_string, "getPrototypeOf") \
- V(get_string, "get") \
- V(get_space_string, "get ") \
- V(global_string, "global") \
- V(group_string, "group") \
- V(groups_string, "groups") \
- V(has_string, "has") \
- V(hour_string, "hour") \
- V(ignoreCase_string, "ignoreCase") \
- V(illegal_access_string, "illegal access") \
- V(illegal_argument_string, "illegal argument") \
- V(index_string, "index") \
- V(infinity_string, "infinity") \
- V(Infinity_string, "Infinity") \
- V(integer_string, "integer") \
- V(input_string, "input") \
- V(isExtensible_string, "isExtensible") \
- V(isView_string, "isView") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(keys_string, "keys") \
- V(lastIndex_string, "lastIndex") \
- V(length_string, "length") \
- V(let_string, "let") \
- V(line_string, "line") \
- V(literal_string, "literal") \
- V(Map_string, "Map") \
- V(message_string, "message") \
- V(minus_Infinity_string, "-Infinity") \
- V(minus_zero_string, "-0") \
- V(minusSign_string, "minusSign") \
- V(minute_string, "minute") \
- V(Module_string, "Module") \
- V(month_string, "month") \
- V(multiline_string, "multiline") \
- V(name_string, "name") \
- V(native_string, "native") \
- V(nan_string, "nan") \
- V(NaN_string, "NaN") \
- V(new_target_string, ".new.target") \
- V(next_string, "next") \
- V(NFC_string, "NFC") \
- V(NFD_string, "NFD") \
- V(NFKC_string, "NFKC") \
- V(NFKD_string, "NFKD") \
- V(not_equal, "not-equal") \
- V(null_string, "null") \
- V(null_to_string, "[object Null]") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(object_string, "object") \
- V(Object_string, "Object") \
- V(ok, "ok") \
- V(one_string, "1") \
- V(ownKeys_string, "ownKeys") \
- V(percentSign_string, "percentSign") \
- V(plusSign_string, "plusSign") \
- V(position_string, "position") \
- V(preventExtensions_string, "preventExtensions") \
- V(Promise_string, "Promise") \
- V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
- V(promise_string, "promise") \
- V(proto_string, "__proto__") \
- V(prototype_string, "prototype") \
- V(proxy_string, "proxy") \
- V(Proxy_string, "Proxy") \
- V(query_colon_string, "(?:)") \
- V(RangeError_string, "RangeError") \
- V(raw_string, "raw") \
- V(ReferenceError_string, "ReferenceError") \
- V(RegExp_string, "RegExp") \
- V(reject_string, "reject") \
- V(resolve_string, "resolve") \
- V(return_string, "return") \
- V(revoke_string, "revoke") \
- V(script_string, "script") \
- V(second_string, "second") \
- V(setPrototypeOf_string, "setPrototypeOf") \
- V(set_space_string, "set ") \
- V(set_string, "set") \
- V(Set_string, "Set") \
- V(source_string, "source") \
- V(sourceText_string, "sourceText") \
- V(stack_string, "stack") \
- V(stackTraceLimit_string, "stackTraceLimit") \
- V(star_default_star_string, "*default*") \
- V(sticky_string, "sticky") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(symbol_species_string, "[Symbol.species]") \
- V(SyntaxError_string, "SyntaxError") \
- V(then_string, "then") \
- V(this_function_string, ".this_function") \
- V(this_string, "this") \
- V(throw_string, "throw") \
- V(timed_out, "timed-out") \
- V(timeZoneName_string, "timeZoneName") \
- V(toJSON_string, "toJSON") \
- V(toString_string, "toString") \
- V(true_string, "true") \
- V(TypeError_string, "TypeError") \
- V(type_string, "type") \
- V(CompileError_string, "CompileError") \
- V(LinkError_string, "LinkError") \
- V(RuntimeError_string, "RuntimeError") \
- V(undefined_string, "undefined") \
- V(undefined_to_string, "[object Undefined]") \
- V(unicode_string, "unicode") \
- V(use_asm_string, "use asm") \
- V(use_strict_string, "use strict") \
- V(URIError_string, "URIError") \
- V(valueOf_string, "valueOf") \
- V(values_string, "values") \
- V(value_string, "value") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(weekday_string, "weekday") \
- V(will_handle_string, "willHandle") \
- V(writable_string, "writable") \
- V(year_string, "year") \
+#define INTERNALIZED_STRING_LIST(V) \
+ V(add_string, "add") \
+ V(anonymous_function_string, "(anonymous function)") \
+ V(anonymous_string, "anonymous") \
+ V(apply_string, "apply") \
+ V(Arguments_string, "Arguments") \
+ V(arguments_string, "arguments") \
+ V(arguments_to_string, "[object Arguments]") \
+ V(Array_string, "Array") \
+ V(array_to_string, "[object Array]") \
+ V(ArrayBuffer_string, "ArrayBuffer") \
+ V(ArrayIterator_string, "Array Iterator") \
+ V(assign_string, "assign") \
+ V(async_string, "async") \
+ V(await_string, "await") \
+ V(BigInt_string, "BigInt") \
+ V(bigint_string, "bigint") \
+ V(BigInt64Array_string, "BigInt64Array") \
+ V(BigUint64Array_string, "BigUint64Array") \
+ V(bind_string, "bind") \
+ V(Boolean_string, "Boolean") \
+ V(boolean_string, "boolean") \
+ V(boolean_to_string, "[object Boolean]") \
+ V(bound__string, "bound ") \
+ V(buffer_string, "buffer") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(call_string, "call") \
+ V(callee_string, "callee") \
+ V(caller_string, "caller") \
+ V(cell_value_string, "%cell_value") \
+ V(char_at_string, "CharAt") \
+ V(closure_string, "(closure)") \
+ V(column_string, "column") \
+ V(CompileError_string, "CompileError") \
+ V(configurable_string, "configurable") \
+ V(construct_string, "construct") \
+ V(constructor_string, "constructor") \
+ V(create_string, "create") \
+ V(currency_string, "currency") \
+ V(Date_string, "Date") \
+ V(date_to_string, "[object Date]") \
+ V(day_string, "day") \
+ V(dayperiod_string, "dayperiod") \
+ V(decimal_string, "decimal") \
+ V(default_string, "default") \
+ V(defineProperty_string, "defineProperty") \
+ V(deleteProperty_string, "deleteProperty") \
+ V(did_handle_string, "didHandle") \
+ V(display_name_string, "displayName") \
+ V(done_string, "done") \
+ V(dot_catch_string, ".catch") \
+ V(dot_for_string, ".for") \
+ V(dot_generator_object_string, ".generator_object") \
+ V(dot_iterator_string, ".iterator") \
+ V(dot_result_string, ".result") \
+ V(dot_string, ".") \
+ V(dot_switch_tag_string, ".switch_tag") \
+ V(dotAll_string, "dotAll") \
+ V(enqueue_string, "enqueue") \
+ V(entries_string, "entries") \
+ V(enumerable_string, "enumerable") \
+ V(era_string, "era") \
+ V(Error_string, "Error") \
+ V(error_to_string, "[object Error]") \
+ V(eval_string, "eval") \
+ V(EvalError_string, "EvalError") \
+ V(exec_string, "exec") \
+ V(false_string, "false") \
+ V(flags_string, "flags") \
+ V(Float32Array_string, "Float32Array") \
+ V(Float64Array_string, "Float64Array") \
+ V(fraction_string, "fraction") \
+ V(Function_string, "Function") \
+ V(function_native_code_string, "function () { [native code] }") \
+ V(function_string, "function") \
+ V(function_to_string, "[object Function]") \
+ V(Generator_string, "Generator") \
+ V(get_space_string, "get ") \
+ V(get_string, "get") \
+ V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
+ V(getPrototypeOf_string, "getPrototypeOf") \
+ V(global_string, "global") \
+ V(group_string, "group") \
+ V(groups_string, "groups") \
+ V(has_string, "has") \
+ V(hour_string, "hour") \
+ V(ignoreCase_string, "ignoreCase") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_argument_string, "illegal argument") \
+ V(index_string, "index") \
+ V(Infinity_string, "Infinity") \
+ V(infinity_string, "infinity") \
+ V(input_string, "input") \
+ V(Int16Array_string, "Int16Array") \
+ V(Int32Array_string, "Int32Array") \
+ V(Int8Array_string, "Int8Array") \
+ V(integer_string, "integer") \
+ V(isExtensible_string, "isExtensible") \
+ V(isView_string, "isView") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(keys_string, "keys") \
+ V(lastIndex_string, "lastIndex") \
+ V(length_string, "length") \
+ V(let_string, "let") \
+ V(line_string, "line") \
+ V(LinkError_string, "LinkError") \
+ V(literal_string, "literal") \
+ V(Map_string, "Map") \
+ V(MapIterator_string, "Map Iterator") \
+ V(message_string, "message") \
+ V(minus_Infinity_string, "-Infinity") \
+ V(minus_zero_string, "-0") \
+ V(minusSign_string, "minusSign") \
+ V(minute_string, "minute") \
+ V(Module_string, "Module") \
+ V(month_string, "month") \
+ V(multiline_string, "multiline") \
+ V(name_string, "name") \
+ V(NaN_string, "NaN") \
+ V(nan_string, "nan") \
+ V(native_string, "native") \
+ V(new_target_string, ".new.target") \
+ V(next_string, "next") \
+ V(NFC_string, "NFC") \
+ V(NFD_string, "NFD") \
+ V(NFKC_string, "NFKC") \
+ V(NFKD_string, "NFKD") \
+ V(not_equal, "not-equal") \
+ V(null_string, "null") \
+ V(null_to_string, "[object Null]") \
+ V(Number_string, "Number") \
+ V(number_string, "number") \
+ V(number_to_string, "[object Number]") \
+ V(Object_string, "Object") \
+ V(object_string, "object") \
+ V(object_to_string, "[object Object]") \
+ V(ok, "ok") \
+ V(one_string, "1") \
+ V(ownKeys_string, "ownKeys") \
+ V(percentSign_string, "percentSign") \
+ V(plusSign_string, "plusSign") \
+ V(position_string, "position") \
+ V(preventExtensions_string, "preventExtensions") \
+ V(Promise_string, "Promise") \
+ V(promise_string, "promise") \
+ V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
+ V(proto_string, "__proto__") \
+ V(prototype_string, "prototype") \
+ V(proxy_string, "proxy") \
+ V(Proxy_string, "Proxy") \
+ V(query_colon_string, "(?:)") \
+ V(RangeError_string, "RangeError") \
+ V(raw_string, "raw") \
+ V(ReferenceError_string, "ReferenceError") \
+ V(RegExp_string, "RegExp") \
+ V(regexp_to_string, "[object RegExp]") \
+ V(reject_string, "reject") \
+ V(resolve_string, "resolve") \
+ V(return_string, "return") \
+ V(revoke_string, "revoke") \
+ V(RuntimeError_string, "RuntimeError") \
+ V(Script_string, "Script") \
+ V(script_string, "script") \
+ V(second_string, "second") \
+ V(set_space_string, "set ") \
+ V(Set_string, "Set") \
+ V(set_string, "set") \
+ V(SetIterator_string, "Set Iterator") \
+ V(setPrototypeOf_string, "setPrototypeOf") \
+ V(SharedArrayBuffer_string, "SharedArrayBuffer") \
+ V(source_string, "source") \
+ V(sourceText_string, "sourceText") \
+ V(stack_string, "stack") \
+ V(stackTraceLimit_string, "stackTraceLimit") \
+ V(star_default_star_string, "*default*") \
+ V(sticky_string, "sticky") \
+ V(String_string, "String") \
+ V(string_string, "string") \
+ V(string_to_string, "[object String]") \
+ V(symbol_species_string, "[Symbol.species]") \
+ V(Symbol_string, "Symbol") \
+ V(symbol_string, "symbol") \
+ V(SyntaxError_string, "SyntaxError") \
+ V(then_string, "then") \
+ V(this_function_string, ".this_function") \
+ V(this_string, "this") \
+ V(throw_string, "throw") \
+ V(timed_out, "timed-out") \
+ V(timeZoneName_string, "timeZoneName") \
+ V(toJSON_string, "toJSON") \
+ V(toString_string, "toString") \
+ V(true_string, "true") \
+ V(type_string, "type") \
+ V(TypeError_string, "TypeError") \
+ V(Uint16Array_string, "Uint16Array") \
+ V(Uint32Array_string, "Uint32Array") \
+ V(Uint8Array_string, "Uint8Array") \
+ V(Uint8ClampedArray_string, "Uint8ClampedArray") \
+ V(undefined_string, "undefined") \
+ V(undefined_to_string, "[object Undefined]") \
+ V(unicode_string, "unicode") \
+ V(URIError_string, "URIError") \
+ V(use_asm_string, "use asm") \
+ V(use_strict_string, "use strict") \
+ V(value_string, "value") \
+ V(valueOf_string, "valueOf") \
+ V(values_string, "values") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
+ V(weekday_string, "weekday") \
+ V(will_handle_string, "willHandle") \
+ V(writable_string, "writable") \
+ V(year_string, "year") \
V(zero_string, "0")
-#define PRIVATE_SYMBOL_LIST(V) \
- V(array_iteration_kind_symbol) \
- V(array_iterator_next_symbol) \
- V(array_iterator_object_symbol) \
- V(call_site_frame_array_symbol) \
- V(call_site_frame_index_symbol) \
- V(console_context_id_symbol) \
- V(console_context_name_symbol) \
- V(class_fields_symbol) \
- V(class_positions_symbol) \
- V(detailed_stack_trace_symbol) \
- V(elements_transition_symbol) \
- V(error_end_pos_symbol) \
- V(error_script_symbol) \
- V(error_start_pos_symbol) \
- V(frozen_symbol) \
- V(generic_symbol) \
- V(home_object_symbol) \
- V(intl_initialized_marker_symbol) \
- V(intl_pattern_symbol) \
- V(intl_resolved_symbol) \
- V(megamorphic_symbol) \
- V(native_context_index_symbol) \
- V(nonextensible_symbol) \
- V(not_mapped_symbol) \
- V(premonomorphic_symbol) \
- V(promise_async_stack_id_symbol) \
- V(promise_debug_marker_symbol) \
- V(promise_forwarding_handler_symbol) \
- V(promise_handled_by_symbol) \
- V(promise_async_id_symbol) \
- V(promise_default_resolve_handler_symbol) \
- V(promise_default_reject_handler_symbol) \
- V(sealed_symbol) \
- V(stack_trace_symbol) \
- V(strict_function_transition_symbol) \
- V(wasm_function_index_symbol) \
- V(wasm_instance_symbol) \
+#define PRIVATE_SYMBOL_LIST(V) \
+ V(call_site_frame_array_symbol) \
+ V(call_site_frame_index_symbol) \
+ V(console_context_id_symbol) \
+ V(console_context_name_symbol) \
+ V(class_fields_symbol) \
+ V(class_positions_symbol) \
+ V(detailed_stack_trace_symbol) \
+ V(elements_transition_symbol) \
+ V(error_end_pos_symbol) \
+ V(error_script_symbol) \
+ V(error_start_pos_symbol) \
+ V(frozen_symbol) \
+ V(generator_outer_promise_symbol) \
+ V(generic_symbol) \
+ V(home_object_symbol) \
+ V(intl_initialized_marker_symbol) \
+ V(intl_pattern_symbol) \
+ V(intl_resolved_symbol) \
+ V(megamorphic_symbol) \
+ V(native_context_index_symbol) \
+ V(nonextensible_symbol) \
+ V(not_mapped_symbol) \
+ V(premonomorphic_symbol) \
+ V(promise_async_stack_id_symbol) \
+ V(promise_debug_marker_symbol) \
+ V(promise_forwarding_handler_symbol) \
+ V(promise_handled_by_symbol) \
+ V(promise_async_id_symbol) \
+ V(sealed_symbol) \
+ V(stack_trace_symbol) \
+ V(strict_function_transition_symbol) \
+ V(wasm_function_index_symbol) \
+ V(wasm_instance_symbol) \
V(uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST(V) \
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 44ab099ba8..3aafd191cc 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -424,18 +424,11 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
shared_(shared),
bailout_(bailout),
on_hold_(on_hold),
- weak_objects_(weak_objects),
- total_marked_bytes_(0),
- pending_task_count_(0),
- task_count_(0) {
+ weak_objects_(weak_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking);
#endif
- for (int i = 0; i <= kMaxTasks; i++) {
- is_pending_[i] = false;
- task_state_[i].marked_bytes = 0;
- }
}
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
@@ -443,13 +436,8 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
- LiveBytesMap* live_bytes = nullptr;
- {
- base::LockGuard<base::Mutex> guard(&task_state->lock);
- live_bytes = &task_state->live_bytes;
- }
- ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
- task_id);
+ ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes,
+ weak_objects_, task_id);
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
@@ -458,9 +446,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
{
TimedScope scope(&time_ms);
+
bool done = false;
while (!done) {
- base::LockGuard<base::Mutex> guard(&task_state->lock);
size_t current_marked_bytes = 0;
int objects_processed = 0;
while (current_marked_bytes < kBytesUntilInterruptCheck &&
@@ -484,17 +472,16 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
- if (task_state->interrupt_request.Value()) {
- task_state->interrupt_condition.Wait(&task_state->lock);
+ if (task_state->preemption_request.Value()) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ConcurrentMarking::Run Preempted");
+ break;
}
}
- {
- // Take the lock to synchronize with worklist update after
- // young generation GC.
- base::LockGuard<base::Mutex> guard(&task_state->lock);
- bailout_->FlushToGlobal(task_id);
- on_hold_->FlushToGlobal(task_id);
- }
+ shared_->FlushToGlobal(task_id);
+ bailout_->FlushToGlobal(task_id);
+ on_hold_->FlushToGlobal(task_id);
+
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
@@ -517,21 +504,21 @@ void ConcurrentMarking::ScheduleTasks() {
DCHECK(heap_->use_tasks());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
+ DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
- // TODO(ulan): Increase the number of tasks for platforms that benefit
- // from it.
- task_count_ = static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() / 2);
- task_count_ = Max(Min(task_count_, kMaxTasks), 1);
+ task_count_ = Max(
+ 1, Min(kMaxTasks,
+ static_cast<int>(V8::GetCurrentPlatform()
+ ->NumberOfAvailableBackgroundThreads())));
}
// Task id 0 is for the main thread.
- for (int i = 1; i <= task_count_ && pending_task_count_ < task_count_; i++) {
+ for (int i = 1; i <= task_count_; i++) {
if (!is_pending_[i]) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
- task_state_[i].interrupt_request.SetValue(false);
+ task_state_[i].preemption_request.SetValue(false);
is_pending_[i] = true;
++pending_task_count_;
Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
@@ -540,6 +527,7 @@ void ConcurrentMarking::ScheduleTasks() {
task, v8::Platform::kShortRunningTask);
}
}
+ DCHECK_EQ(task_count_, pending_task_count_);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
@@ -553,25 +541,24 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
}
-void ConcurrentMarking::WaitForTasks() {
- if (!FLAG_concurrent_marking) return;
+bool ConcurrentMarking::Stop(StopRequest stop_request) {
+ if (!FLAG_concurrent_marking) return false;
base::LockGuard<base::Mutex> guard(&pending_lock_);
- while (pending_task_count_ > 0) {
- pending_condition_.Wait(&pending_lock_);
- }
-}
-void ConcurrentMarking::EnsureCompleted() {
- if (!FLAG_concurrent_marking) return;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
- CancelableTaskManager* task_manager =
- heap_->isolate()->cancelable_task_manager();
- for (int i = 1; i <= task_count_; i++) {
- if (is_pending_[i]) {
- if (task_manager->TryAbort(cancelable_id_[i]) ==
- CancelableTaskManager::kTaskAborted) {
- is_pending_[i] = false;
- --pending_task_count_;
+ if (pending_task_count_ == 0) return false;
+
+ if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
+ CancelableTaskManager* task_manager =
+ heap_->isolate()->cancelable_task_manager();
+ for (int i = 1; i <= task_count_; i++) {
+ if (is_pending_[i]) {
+ if (task_manager->TryAbort(cancelable_id_[i]) ==
+ CancelableTaskManager::kTaskAborted) {
+ is_pending_[i] = false;
+ --pending_task_count_;
+ } else if (stop_request == StopRequest::PREEMPT_TASKS) {
+ task_state_[i].preemption_request.SetValue(true);
+ }
}
}
}
@@ -581,6 +568,7 @@ void ConcurrentMarking::EnsureCompleted() {
for (int i = 1; i <= task_count_; i++) {
DCHECK(!is_pending_[i]);
}
+ return true;
}
void ConcurrentMarking::FlushLiveBytes(
@@ -620,25 +608,14 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
}
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
- : concurrent_marking_(concurrent_marking) {
- if (!FLAG_concurrent_marking) return;
- // Request task_state for all tasks.
- for (int i = 1; i <= kMaxTasks; i++) {
- concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
- }
- // Now take a lock to ensure that the tasks are waiting.
- for (int i = 1; i <= kMaxTasks; i++) {
- concurrent_marking_->task_state_[i].lock.Lock();
- }
+ : concurrent_marking_(concurrent_marking),
+ resume_on_exit_(concurrent_marking_->Stop(
+ ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
+ DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
ConcurrentMarking::PauseScope::~PauseScope() {
- if (!FLAG_concurrent_marking) return;
- for (int i = kMaxTasks; i >= 1; i--) {
- concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
- concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
- concurrent_marking_->task_state_[i].lock.Unlock();
- }
+ if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 0f0c8bf992..c5af406e45 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -2,10 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_CONCURRENT_MARKING_
-#define V8_HEAP_CONCURRENT_MARKING_
+#ifndef V8_HEAP_CONCURRENT_MARKING_H_
+#define V8_HEAP_CONCURRENT_MARKING_H_
+#include "include/v8-platform.h"
#include "src/allocation.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
#include "src/cancelable-task.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
@@ -26,26 +30,48 @@ using LiveBytesMap =
class ConcurrentMarking {
public:
// When the scope is entered, the concurrent marking tasks
- // are paused and are not looking at the heap objects.
+ // are preempted and are not looking at the heap objects, concurrent marking
+ // is resumed when the scope is exited.
class PauseScope {
public:
explicit PauseScope(ConcurrentMarking* concurrent_marking);
~PauseScope();
private:
- ConcurrentMarking* concurrent_marking_;
+ ConcurrentMarking* const concurrent_marking_;
+ const bool resume_on_exit_;
};
- static const int kMaxTasks = 4;
+ enum class StopRequest {
+ // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
+ PREEMPT_TASKS,
+ // Wait for ongoing tasks to complete (and cancels unstarted tasks).
+ COMPLETE_ONGOING_TASKS,
+ // Wait for all scheduled tasks to complete (only use this in tests that
+ // control the full stack -- otherwise tasks cancelled by the platform can
+ // make this call hang).
+ COMPLETE_TASKS_FOR_TESTING,
+ };
+
+ // TODO(gab): The only thing that prevents this being above 7 is
+ // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
+ // task 0, reserved for the main thread).
+ static constexpr int kMaxTasks = 7;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout, MarkingWorklist* on_hold,
WeakObjects* weak_objects);
+ // Schedules asynchronous tasks to perform concurrent marking. Objects in the
+ // heap should not be moved while these are active (can be stopped safely via
+ // Stop() or PauseScope).
void ScheduleTasks();
- void WaitForTasks();
- void EnsureCompleted();
+
+ // Stops concurrent marking per |stop_request|'s semantics. Returns true
+ // if concurrent marking was in progress, false otherwise.
+ bool Stop(StopRequest stop_request);
+
void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
@@ -59,37 +85,32 @@ class ConcurrentMarking {
private:
struct TaskState {
- // When the concurrent marking task has this lock, then objects in the
- // heap are guaranteed to not move.
- base::Mutex lock;
- // The main thread sets this flag to true, when it wants the concurrent
- // maker to give up the lock.
- base::AtomicValue<bool> interrupt_request;
- // The concurrent marker waits on this condition until the request
- // flag is cleared by the main thread.
- base::ConditionVariable interrupt_condition;
+ // The main thread sets this flag to true when it wants the concurrent
+ // marker to give up the worker thread.
+ base::AtomicValue<bool> preemption_request;
+
LiveBytesMap live_bytes;
- size_t marked_bytes;
+ size_t marked_bytes = 0;
char cache_line_padding[64];
};
class Task;
void Run(int task_id, TaskState* task_state);
- Heap* heap_;
- MarkingWorklist* shared_;
- MarkingWorklist* bailout_;
- MarkingWorklist* on_hold_;
- WeakObjects* weak_objects_;
+ Heap* const heap_;
+ MarkingWorklist* const shared_;
+ MarkingWorklist* const bailout_;
+ MarkingWorklist* const on_hold_;
+ WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
- base::AtomicNumber<size_t> total_marked_bytes_;
+ base::AtomicNumber<size_t> total_marked_bytes_{0};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
- int pending_task_count_;
- bool is_pending_[kMaxTasks + 1];
- CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1];
- int task_count_;
+ int pending_task_count_ = 0;
+ bool is_pending_[kMaxTasks + 1] = {};
+ CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
+ int task_count_ = 0;
};
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_PAGE_PARALLEL_JOB_
+#endif // V8_HEAP_CONCURRENT_MARKING_H_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index f4e5c1fe13..41af95fa44 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -626,6 +626,7 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
LargePage* page = heap_->lo_space()->first_page();
while (page != nullptr) {
if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
page = page->next_page();
@@ -640,6 +641,7 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
LargePage* page = heap_->lo_space()->first_page();
while (page != nullptr) {
if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
page = page->next_page();
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 7f965602b8..9a83c0d172 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -17,7 +17,6 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -46,6 +45,7 @@
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
+#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/data-handler.h"
#include "src/objects/shared-function-info.h"
@@ -56,6 +56,7 @@
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
@@ -177,6 +178,7 @@ Heap::Heap()
raw_allocations_hash_(0),
stress_marking_observer_(nullptr),
stress_scavenge_observer_(nullptr),
+ allocation_step_in_progress_(false),
max_marking_limit_reached_(0.0),
ms_count_(0),
gc_count_(0),
@@ -461,30 +463,6 @@ bool Heap::IsRetainingPathTarget(HeapObject* object,
return false;
}
-namespace {
-const char* RootToString(Root root) {
- switch (root) {
-#define ROOT_CASE(root_id, ignore, description) \
- case Root::root_id: \
- return description;
- ROOT_ID_LIST(ROOT_CASE)
-#undef ROOT_CASE
- case Root::kCodeFlusher:
- return "(Code flusher)";
- case Root::kPartialSnapshotCache:
- return "(Partial snapshot cache)";
- case Root::kWeakCollections:
- return "(Weak collections)";
- case Root::kWrapperTracing:
- return "(Wrapper tracing)";
- case Root::kUnknown:
- return "(Unknown)";
- }
- UNREACHABLE();
- return nullptr;
-}
-} // namespace
-
void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
@@ -527,7 +505,7 @@ void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
}
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
- PrintF("Root: %s\n", RootToString(root));
+ PrintF("Root: %s\n", RootVisitor::RootName(root));
PrintF("-------------------------------------------------\n");
}
@@ -644,7 +622,7 @@ const char* Heap::GetSpaceName(int idx) {
return nullptr;
}
-void Heap::SetRootCodeStubs(NumberDictionary* value) {
+void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
@@ -1112,6 +1090,66 @@ void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
set_current_gc_flags(kNoGCFlags);
}
+namespace {
+
+intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
+ int words = size / kPointerSize;
+ DCHECK_EQ(a->Size(), size);
+ DCHECK_EQ(b->Size(), size);
+ intptr_t* slot_a = reinterpret_cast<intptr_t*>(a->address());
+ intptr_t* slot_b = reinterpret_cast<intptr_t*>(b->address());
+ for (int i = 0; i < words; i++) {
+ if (*slot_a != *slot_b) {
+ return *slot_a - *slot_b;
+ }
+ slot_a++;
+ slot_b++;
+ }
+ return 0;
+}
+
+void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
+ if (objects.size() == 0) return;
+
+ sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
+ intptr_t c = CompareWords(size, a, b);
+ if (c != 0) return c < 0;
+ return a < b;
+ });
+
+ std::vector<std::pair<int, HeapObject*>> duplicates;
+ HeapObject* current = objects[0];
+ int count = 1;
+ for (size_t i = 1; i < objects.size(); i++) {
+ if (CompareWords(size, current, objects[i]) == 0) {
+ count++;
+ } else {
+ if (count > 1) {
+ duplicates.push_back(std::make_pair(count - 1, current));
+ }
+ count = 1;
+ current = objects[i];
+ }
+ }
+ if (count > 1) {
+ duplicates.push_back(std::make_pair(count - 1, current));
+ }
+
+ int threshold = FLAG_trace_duplicate_threshold_kb * KB;
+
+ sort(duplicates.begin(), duplicates.end());
+ for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
+ int duplicate_bytes = it->first * size;
+ if (duplicate_bytes < threshold) break;
+ PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
+ duplicate_bytes / KB);
+ PrintF("Sample object: ");
+ it->second->Print();
+ PrintF("============================\n");
+ }
+}
+} // anonymous namespace
+
void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
@@ -1129,12 +1167,9 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
RuntimeCallTimerScope runtime_timer(
isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
- if (isolate()->concurrent_recompilation_enabled()) {
- // The optimizing compiler may be unnecessarily holding on to memory.
- DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -1151,6 +1186,28 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
UncommitFromSpace();
+
+ if (FLAG_trace_duplicate_threshold_kb) {
+ std::map<int, std::vector<HeapObject*>> objects_by_size;
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next(); space != nullptr;
+ space = spaces.next()) {
+ HeapObjectIterator it(space);
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ objects_by_size[obj->Size()].push_back(obj);
+ }
+ }
+ {
+ LargeObjectIterator it(lo_space());
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ objects_by_size[obj->Size()].push_back(obj);
+ }
+ }
+ for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
+ ++it) {
+ ReportDuplicates(it->first, it->second);
+ }
+ }
}
void Heap::ReportExternalMemoryPressure() {
@@ -1316,11 +1373,8 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
- if (isolate()->concurrent_recompilation_enabled()) {
- // Flush the queued recompilation tasks.
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
+
number_of_disposed_maps_ = retained_maps()->Length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
@@ -1733,12 +1787,12 @@ void Heap::MarkCompact() {
void Heap::MinorMarkCompact() {
DCHECK(FLAG_minor_mc);
+ PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(isolate());
- PauseAllocationObserversScope pause_observers(this);
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
CodeSpaceMemoryModificationScope code_modifcation(this);
@@ -1924,11 +1978,10 @@ int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(new_space()->TotalCapacity()) / MB;
- return Max(
- 1,
- Min(Min(num_scavenge_tasks, kMaxScavengerTasks),
- static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())));
+ static int num_cores =
+ 1 + static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+ return Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
}
void Heap::Scavenge() {
@@ -2015,7 +2068,7 @@ void Heap::Scavenge() {
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run();
+ job.Run(isolate()->async_counters());
DCHECK(copied_list.IsGlobalEmpty());
DCHECK(promotion_list.IsGlobalEmpty());
}
@@ -2187,7 +2240,8 @@ void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
if (!new_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(),
+ v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
+ new_space_strings_.data(),
new_space_strings_.data() + new_space_strings_.size());
}
}
@@ -2195,7 +2249,8 @@ void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
IterateNewSpaceStrings(v);
if (!old_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(),
+ v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
+ old_space_strings_.data(),
old_space_strings_.data() + old_space_strings_.size());
}
}
@@ -2301,7 +2356,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
explicit ExternalStringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
- virtual void VisitRootPointers(Root root, Object** start, Object** end) {
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
@@ -2512,12 +2568,12 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
return result;
}
-AllocationResult Heap::AllocateBigInt(int length) {
+AllocationResult Heap::AllocateBigInt(int length, PretenureFlag pretenure) {
if (length < 0 || length > BigInt::kMaxLength) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
}
int size = BigInt::SizeFor(length);
- AllocationSpace space = SelectSpace(NOT_TENURED);
+ AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
@@ -2541,6 +2597,20 @@ AllocationResult Heap::AllocateCell(Object* value) {
return result;
}
+AllocationResult Heap::AllocateFeedbackCell(Map* map, HeapObject* value) {
+ int size = FeedbackCell::kSize;
+ STATIC_ASSERT(FeedbackCell::kSize <= kMaxRegularHeapObjectSize);
+
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ FeedbackCell::cast(result)->set_value(value);
+ return result;
+}
+
AllocationResult Heap::AllocatePropertyCell(Name* name) {
DCHECK(name->IsUniqueName());
int size = PropertyCell::kSize;
@@ -2849,11 +2919,11 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_parameter_count(parameter_count);
instance->set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::kInterruptBudget);
+ instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_osr_loop_nesting_level(0);
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(constant_pool);
- instance->set_handler_table(empty_fixed_array());
+ instance->set_handler_table(empty_byte_array());
instance->set_source_position_table(empty_byte_array());
CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
instance->clear_padding();
@@ -3145,10 +3215,10 @@ AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
AllocationResult Heap::AllocateCode(
const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, HandlerTable* handler_table,
- ByteArray* source_position_table, DeoptimizationData* deopt_data,
- Movability movability, uint32_t stub_key, bool is_turbofanned,
- int stack_slots, int safepoint_table_offset) {
+ CodeDataContainer* data_container, ByteArray* source_position_table,
+ DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset) {
bool has_unwinding_info = desc.unwinding_info != nullptr;
DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
(!has_unwinding_info && desc.unwinding_info_size == 0));
@@ -3174,11 +3244,11 @@ AllocationResult Heap::AllocateCode(
code->set_relocation_info(reloc_info);
code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
code->set_safepoint_table_offset(safepoint_table_offset);
+ code->set_handler_table_offset(handler_table_offset);
code->set_code_data_container(data_container);
code->set_has_tagged_params(true);
code->set_deoptimization_data(deopt_data);
code->set_stub_key(stub_key);
- code->set_handler_table(handler_table);
code->set_source_position_table(source_position_table);
code->set_protected_instructions(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
@@ -3322,6 +3392,21 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
return result;
}
+AllocationResult Heap::AllocateJSPromise(JSFunction* constructor,
+ PretenureFlag pretenure) {
+ AllocationResult allocation = AllocateJSObject(constructor, pretenure);
+ JSPromise* promise = nullptr;
+ if (!allocation.To(&promise)) return allocation;
+
+ // Setup JSPromise fields
+ promise->set_reactions_or_result(Smi::kZero);
+ promise->set_flags(0);
+ for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
+ promise->SetEmbedderField(i, Smi::kZero);
+ }
+ return promise;
+}
+
void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
Map* map) {
obj->set_raw_properties_or_hash(properties);
@@ -3503,28 +3588,17 @@ static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
int len) {
- const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
- size_t stream_length = vector.length();
- while (stream_length != 0) {
- size_t consumed = 0;
- uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
+ while (!it.Done()) {
+ DCHECK_GT(len, 0);
+ len -= 1;
+
+ uint16_t c = *it;
+ ++it;
DCHECK_NE(unibrow::Utf8::kBadChar, c);
- DCHECK(consumed <= stream_length);
- stream_length -= consumed;
- stream += consumed;
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- len -= 2;
- if (len < 0) break;
- *chars++ = unibrow::Utf16::LeadSurrogate(c);
- *chars++ = unibrow::Utf16::TrailSurrogate(c);
- } else {
- len -= 1;
- if (len < 0) break;
- *chars++ = c;
- }
+ *chars++ = c;
}
- DCHECK_EQ(0, stream_length);
- DCHECK_EQ(0, len);
+ DCHECK_EQ(len, 0);
}
@@ -4447,12 +4521,8 @@ class MemoryPressureInterruptTask : public CancelableTask {
void Heap::CheckMemoryPressure() {
if (HighMemoryPressure()) {
- if (isolate()->concurrent_recompilation_enabled()) {
- // The optimizing compiler may be unnecessarily holding on to memory.
- DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush(
- OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
- }
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure();
@@ -4877,8 +4947,9 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
- &roots_[kStringTableRootIndex]));
+ v->VisitRootPointer(
+ Root::kStringTable, nullptr,
+ reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
mode != VISIT_FOR_SERIALIZATION) {
@@ -4893,13 +4964,13 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitRootPointers(Root::kSmiRootList, &roots_[kSmiRootsStart],
+ v->VisitRootPointers(Root::kSmiRootList, nullptr, &roots_[kSmiRootsStart],
&roots_[kRootListLength]);
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
- visitor->VisitRootPointer(Root::kWeakCollections,
+ visitor->VisitRootPointer(Root::kWeakCollections, nullptr,
&encountered_weak_collections_);
}
@@ -4913,9 +4984,13 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
USE(heap_);
}
- void VisitRootPointer(Root root, Object** p) override { FixHandle(p); }
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
+ FixHandle(p);
+ }
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) FixHandle(p);
}
@@ -4951,7 +5026,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
+ v->VisitRootPointers(Root::kStrongRootList, nullptr, &roots_[0],
&roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -5026,7 +5101,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over other strong roots (currently only identity maps).
for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
- v->VisitRootPointers(Root::kStrongRoots, list->start, list->end);
+ v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
}
v->Synchronize(VisitorSynchronization::kStrongRoots);
@@ -5038,6 +5113,9 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
}
}
+void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
+ isolate_->global_handles()->IterateWeakRoots(v);
+}
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
@@ -5745,7 +5823,8 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
void Heap::TearDown() {
- use_tasks_ = false;
+ SetGCState(TEAR_DOWN);
+ DCHECK(!use_tasks_);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -6035,7 +6114,8 @@ void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
class PrintHandleVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++)
PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
@@ -6057,7 +6137,8 @@ class CheckHandleCountVisitor : public RootVisitor {
~CheckHandleCountVisitor() override {
CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
handle_count_ += end - start;
}
@@ -6207,7 +6288,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
MarkPointers(start, end);
}
@@ -6449,6 +6531,10 @@ void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
set_deserialize_lazy_handler_extra_wide(code);
}
+void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
+ set_builtins_constants_table(cache);
+}
+
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
}
@@ -6480,19 +6566,13 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return true;
INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
- *object_type = "CODE_TYPE"; \
- *object_sub_type = "CODE_KIND/" #name; \
- return true;
- CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
-#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
- *object_type = "FIXED_ARRAY_TYPE"; \
- *object_sub_type = #name; \
+
+#define COMPARE_AND_RETURN_NAME(name) \
+ case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
+ *object_type = #name; \
+ *object_sub_type = ""; \
return true;
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
+ VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
}
return false;
@@ -6537,8 +6617,9 @@ void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
VerifyPointers(start, end);
}
-void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void VerifyPointersVisitor::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
VerifyPointers(start, end);
}
@@ -6554,8 +6635,8 @@ void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
}
}
-void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
CHECK((*current)->IsSmi());
}
@@ -6580,12 +6661,11 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
AllocationSpace src = chunk->owner()->identity();
switch (src) {
case NEW_SPACE:
- return dst == src || dst == OLD_SPACE;
+ return dst == NEW_SPACE || dst == OLD_SPACE;
case OLD_SPACE:
- return dst == src &&
- (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
+ return dst == OLD_SPACE;
case CODE_SPACE:
- return dst == src && type == CODE_TYPE;
+ return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
return false;
@@ -6612,6 +6692,7 @@ void AllocationObserver::AllocationStep(int bytes_allocated,
step_size_ = GetNextStepSize();
bytes_to_next_step_ = step_size_;
}
+ DCHECK_GE(bytes_to_next_step_, 0);
}
namespace {
@@ -6638,12 +6719,24 @@ Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == code->GetHeap()->code_map());
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code) {
+ if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
+ }
+#endif
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
return start <= addr && addr < end;
}
Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code) {
+ Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (code != nullptr) return code;
+ }
+#endif
+
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = lo_space()->FindPage(inner_pointer);
if (large_page != nullptr) {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 7cc65479ca..63bcfb2990 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -114,6 +114,7 @@ using v8::MemoryPressureLevel;
V(Map, name_dictionary_map, NameDictionaryMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
V(Map, number_dictionary_map, NumberDictionaryMap) \
+ V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
V(Map, string_table_map, StringTableMap) \
V(Map, weak_hash_table_map, WeakHashTableMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
@@ -168,6 +169,8 @@ using v8::MemoryPressureLevel;
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
+ V(Map, fixed_biguint64_array_map, FixedBigUint64ArrayMap) \
+ V(Map, fixed_bigint64_array_map, FixedBigInt64ArrayMap) \
/* Oddball maps */ \
V(Map, undefined_map, UndefinedMap) \
V(Map, the_hole_map, TheHoleMap) \
@@ -193,8 +196,11 @@ using v8::MemoryPressureLevel;
V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
EmptyFixedUint8ClampedArray) \
+ V(FixedTypedArrayBase, empty_fixed_biguint64_array, \
+ EmptyFixedBigUint64Array) \
+ V(FixedTypedArrayBase, empty_fixed_bigint64_array, EmptyFixedBigInt64Array) \
V(Script, empty_script, EmptyScript) \
- V(Cell, undefined_cell, UndefinedCell) \
+ V(FeedbackCell, many_closures_cell, ManyClosuresCell) \
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
@@ -213,6 +219,8 @@ using v8::MemoryPressureLevel;
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
V(PropertyCell, array_buffer_neutering_protector, \
ArrayBufferNeuteringProtector) \
+ V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
+ V(PropertyCell, promise_then_protector, PromiseThenProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -230,7 +238,7 @@ using v8::MemoryPressureLevel;
V(NameDictionary, api_symbol_table, ApiSymbolTable) \
V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
V(Object, script_list, ScriptList) \
- V(NumberDictionary, code_stubs, CodeStubs) \
+ V(SimpleNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
V(FixedArray, detached_contexts, DetachedContexts) \
@@ -242,6 +250,8 @@ using v8::MemoryPressureLevel;
/* slots refer to the code with the reference to the weak object. */ \
V(ArrayList, weak_new_space_object_to_code_list, \
WeakNewSpaceObjectToCodeList) \
+ /* Indirection lists for isolate-independent builtins */ \
+ V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
/* Feedback vectors that we need for code coverage or type profile */ \
V(Object, feedback_vectors_for_profiling_tools, \
FeedbackVectorsForProfilingTools) \
@@ -340,6 +350,7 @@ using v8::MemoryPressureLevel;
V(JsConstructEntryCode) \
V(JsEntryCode) \
V(JSMessageObjectMap) \
+ V(ManyClosuresCell) \
V(ManyClosuresCellMap) \
V(MetaMap) \
V(MinusInfinityValue) \
@@ -363,6 +374,7 @@ using v8::MemoryPressureLevel;
V(ScopeInfoMap) \
V(ScriptContextMap) \
V(SharedFunctionInfoMap) \
+ V(SimpleNumberDictionaryMap) \
V(SloppyArgumentsElementsMap) \
V(SmallOrderedHashMapMap) \
V(SmallOrderedHashSetMap) \
@@ -377,7 +389,6 @@ using v8::MemoryPressureLevel;
V(TransitionArrayMap) \
V(TrueValue) \
V(TwoPointerFillerMap) \
- V(UndefinedCell) \
V(UndefinedMap) \
V(UndefinedValue) \
V(UninitializedMap) \
@@ -575,7 +586,13 @@ class Heap {
enum FindMementoMode { kForRuntime, kForGC };
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT, MINOR_MARK_COMPACT };
+ enum HeapState {
+ NOT_IN_GC,
+ SCAVENGE,
+ MARK_COMPACT,
+ MINOR_MARK_COMPACT,
+ TEAR_DOWN
+ };
using PretenuringFeedbackMap = std::unordered_map<AllocationSite*, size_t>;
@@ -966,6 +983,8 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
+ void stop_using_tasks() { use_tasks_ = false; }
+
bool use_tasks() const { return use_tasks_; }
// ===========================================================================
@@ -1062,7 +1081,7 @@ class Heap {
Object** roots_array_start() { return roots_; }
// Sets the stub_cache_ (only used when expanding the dictionary).
- void SetRootCodeStubs(NumberDictionary* value);
+ void SetRootCodeStubs(SimpleNumberDictionary* value);
void SetRootMaterializedObjects(FixedArray* objects) {
roots_[kMaterializedObjectsRootIndex] = objects;
@@ -1110,6 +1129,8 @@ class Heap {
void SetDeserializeLazyHandlerWide(Code* code);
void SetDeserializeLazyHandlerExtraWide(Code* code);
+ void SetBuiltinsConstantsTable(FixedArray* cache);
+
// ===========================================================================
// Inline allocation. ========================================================
// ===========================================================================
@@ -1161,15 +1182,15 @@ class Heap {
// Iterators. ================================================================
// ===========================================================================
- // Iterates over all roots in the heap.
void IterateRoots(RootVisitor* v, VisitMode mode);
- // Iterates over all strong roots in the heap.
void IterateStrongRoots(RootVisitor* v, VisitMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
void IterateSmiRoots(RootVisitor* v);
- // Iterates over all the other roots in the heap.
+ // Iterates over weak string tables.
void IterateWeakRoots(RootVisitor* v, VisitMode mode);
+ // Iterates over weak global handles.
+ void IterateWeakGlobalHandles(RootVisitor* v);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1571,6 +1592,11 @@ class Heap {
void RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer);
+ bool allocation_step_in_progress() { return allocation_step_in_progress_; }
+ void set_allocation_step_in_progress(bool val) {
+ allocation_step_in_progress_ = val;
+ }
+
// ===========================================================================
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -2076,7 +2102,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT AllocationResult AllocateBigInt(int length);
+ MUST_USE_RESULT AllocationResult
+ AllocateBigInt(int length, PretenureFlag pretenure = NOT_TENURED);
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
@@ -2265,6 +2292,10 @@ class Heap {
// Allocate a tenured simple cell.
MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
+ // Allocate a tenured simple feedback cell.
+ MUST_USE_RESULT AllocationResult AllocateFeedbackCell(Map* map,
+ HeapObject* value);
+
// Allocate a tenured JS global property cell initialized with the hole.
MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
@@ -2287,13 +2318,16 @@ class Heap {
// Allocates a new code object (fully initialized). All header fields of the
// returned object are immutable and the code object is write protected.
- MUST_USE_RESULT AllocationResult
- AllocateCode(const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, HandlerTable* handler_table,
- ByteArray* source_position_table, DeoptimizationData* deopt_data,
- Movability movability, uint32_t stub_key, bool is_turbofanned,
- int stack_slots, int safepoint_table_offset);
+ MUST_USE_RESULT AllocationResult AllocateCode(
+ const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, ByteArray* reloc_info,
+ CodeDataContainer* data_container, ByteArray* source_position_table,
+ DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
+ bool is_turbofanned, int stack_slots, int safepoint_table_offset,
+ int handler_table_offset);
+
+ MUST_USE_RESULT AllocationResult AllocateJSPromise(
+ JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
void set_force_oom(bool value) { force_oom_ = value; }
@@ -2400,6 +2434,8 @@ class Heap {
// Observer that can cause early scavenge start.
StressScavengeObserver* stress_scavenge_observer_;
+ bool allocation_step_in_progress_;
+
// The maximum percent of the marking limit reached wihout causing marking.
// This is tracked when specyfing --fuzzer-gc-analysis.
double max_marking_limit_reached_;
@@ -2658,6 +2694,7 @@ class AlwaysAllocateScope {
Heap* heap_;
};
+// The CodeSpaceMemoryModificationScope can only be used by the main thread.
class CodeSpaceMemoryModificationScope {
public:
explicit inline CodeSpaceMemoryModificationScope(Heap* heap);
@@ -2667,6 +2704,9 @@ class CodeSpaceMemoryModificationScope {
Heap* heap_;
};
+// The CodePageMemoryModificationScope does not check if tansitions to
+// writeable and back to executable are actually allowed, i.e. the MemoryChunk
+// was registered to be executable. It can be used by concurrent threads.
class CodePageMemoryModificationScope {
public:
explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
@@ -2689,7 +2729,8 @@ class CodePageMemoryModificationScope {
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
private:
void VerifyPointers(Object** start, Object** end);
@@ -2699,7 +2740,8 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
};
// Space iterator for iterating over all the paged spaces of the heap: Map
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4868adc26e..a7b56e4315 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -203,11 +203,13 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
IncrementalMarking* incremental_marking)
: heap_(incremental_marking->heap()) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -653,15 +655,17 @@ bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
- // The object can already be black in two cases:
- // 1. The object is a fixed array with the progress bar.
- // 2. The object is a JSObject that was colored black before
- // unsafe layout change.
- // 3. The object is a string that was colored black before
- // unsafe layout change.
if (!marking_state()->GreyToBlack(obj)) {
- DCHECK(IsFixedArrayWithProgressBar(obj) || obj->IsJSObject() ||
- obj->IsString());
+ // The object can already be black in these cases:
+ // 1. The object is a fixed array with the progress bar.
+ // 2. The object is a JSObject that was colored black before
+ // unsafe layout change.
+ // 3. The object is a string that was colored black before
+ // unsafe layout change.
+ // 4. The object is materizalized by the deoptimizer.
+ DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
+ obj->IsContextExtension() || obj->IsFixedArray() ||
+ obj->IsJSObject() || obj->IsString());
}
DCHECK(marking_state()->IsBlack(obj));
WhiteToGreyAndPush(map);
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index b62aa93cde..8ca289cf1a 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INVALIDATED_SLOTS_INL_H
-#define V8_INVALIDATED_SLOTS_INL_H
+#ifndef V8_HEAP_INVALIDATED_SLOTS_INL_H_
+#define V8_HEAP_INVALIDATED_SLOTS_INL_H_
#include <map>
@@ -67,4 +67,4 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
} // namespace internal
} // namespace v8
-#endif // V8_INVALIDATED_SLOTS_INL_H
+#endif // V8_HEAP_INVALIDATED_SLOTS_INL_H_
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 78ac03bc79..e9410575a3 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INVALIDATED_SLOTS_H
-#define V8_INVALIDATED_SLOTS_H
+#ifndef V8_HEAP_INVALIDATED_SLOTS_H_
+#define V8_HEAP_INVALIDATED_SLOTS_H_
#include <map>
#include <stack>
@@ -51,4 +51,4 @@ class InvalidatedSlotsFilter {
} // namespace internal
} // namespace v8
-#endif // V8_INVALIDATED_SLOTS_H
+#endif // V8_HEAP_INVALIDATED_SLOTS_H_
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
new file mode 100644
index 0000000000..1c8d4c8ac4
--- /dev/null
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -0,0 +1,130 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/item-parallel-job.h"
+
+#include "src/base/platform/semaphore.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+ItemParallelJob::Task::Task(Isolate* isolate) : CancelableTask(isolate) {}
+
+ItemParallelJob::Task::~Task() {
+ // The histogram is reset in RunInternal(). If it's still around it means
+ // this task was cancelled before being scheduled.
+ if (gc_parallel_task_latency_histogram_)
+ gc_parallel_task_latency_histogram_->RecordAbandon();
+}
+
+void ItemParallelJob::Task::SetupInternal(
+ base::Semaphore* on_finish, std::vector<Item*>* items, size_t start_index,
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram) {
+ on_finish_ = on_finish;
+ items_ = items;
+
+ if (start_index < items->size()) {
+ cur_index_ = start_index;
+ } else {
+ items_considered_ = items_->size();
+ }
+
+ gc_parallel_task_latency_histogram_ =
+ std::move(gc_parallel_task_latency_histogram);
+}
+
+void ItemParallelJob::Task::RunInternal() {
+ if (gc_parallel_task_latency_histogram_) {
+ gc_parallel_task_latency_histogram_->RecordDone();
+ gc_parallel_task_latency_histogram_.reset();
+ }
+
+ RunInParallel();
+ on_finish_->Signal();
+}
+
+ItemParallelJob::ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
+ base::Semaphore* pending_tasks)
+ : cancelable_task_manager_(cancelable_task_manager),
+ pending_tasks_(pending_tasks) {}
+
+ItemParallelJob::~ItemParallelJob() {
+ for (size_t i = 0; i < items_.size(); i++) {
+ Item* item = items_[i];
+ CHECK(item->IsFinished());
+ delete item;
+ }
+}
+
+void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
+ DCHECK_GT(tasks_.size(), 0);
+ const size_t num_items = items_.size();
+ const size_t num_tasks = tasks_.size();
+
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ItemParallelJob::Run", TRACE_EVENT_SCOPE_THREAD,
+ "num_tasks", static_cast<int>(num_tasks), "num_items",
+ static_cast<int>(num_items));
+
+ AsyncTimedHistogram gc_parallel_task_latency_histogram(
+ async_counters->gc_parallel_task_latency(), async_counters);
+
+ // Some jobs have more tasks than items (when the items are mere coarse
+ // grain tasks that generate work dynamically for a second phase which all
+ // tasks participate in). Some jobs even have 0 items to preprocess but
+ // still have multiple tasks.
+ // TODO(gab): Figure out a cleaner scheme for this.
+ const size_t num_tasks_processing_items = Min(num_items, tasks_.size());
+
+ // In the event of an uneven workload, distribute an extra item to the first
+ // |items_remainder| tasks.
+ const size_t items_remainder = num_tasks_processing_items > 0
+ ? num_items % num_tasks_processing_items
+ : 0;
+ // Base |items_per_task|, will be bumped by 1 for the first
+ // |items_remainder| tasks.
+ const size_t items_per_task = num_tasks_processing_items > 0
+ ? num_items / num_tasks_processing_items
+ : 0;
+ CancelableTaskManager::Id* task_ids =
+ new CancelableTaskManager::Id[num_tasks];
+ Task* main_task = nullptr;
+ for (size_t i = 0, start_index = 0; i < num_tasks;
+ i++, start_index += items_per_task + (i < items_remainder ? 1 : 0)) {
+ Task* task = tasks_[i];
+
+ // By definition there are less |items_remainder| to distribute then
+ // there are tasks processing items so this cannot overflow while we are
+ // assigning work items.
+ DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
+
+ task->SetupInternal(pending_tasks_, &items_, start_index,
+ i > 0 ? gc_parallel_task_latency_histogram
+ : base::Optional<AsyncTimedHistogram>());
+ task_ids[i] = task->id();
+ if (i > 0) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ } else {
+ main_task = task;
+ }
+ }
+
+ // Contribute on main thread.
+ main_task->Run();
+ delete main_task;
+
+ // Wait for background tasks.
+ for (size_t i = 0; i < num_tasks; i++) {
+ if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_tasks_->Wait();
+ }
+ }
+ delete[] task_ids;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 23c709f87b..4c21f69ca9 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -2,18 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_ITEM_PARALLEL_JOB_
-#define V8_HEAP_ITEM_PARALLEL_JOB_
+#ifndef V8_HEAP_ITEM_PARALLEL_JOB_H_
+#define V8_HEAP_ITEM_PARALLEL_JOB_H_
+#include <memory>
#include <vector>
-#include "src/base/platform/semaphore.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/cancelable-task.h"
-#include "src/v8.h"
+#include "src/counters.h"
+#include "src/globals.h"
namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
namespace internal {
+class Counters;
class Isolate;
// This class manages background tasks that process a set of items in parallel.
@@ -25,14 +36,17 @@ class Isolate;
//
// Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job.
-class ItemParallelJob {
+//
+// Each parallel (non-main thread) task will report the time between the job
+// being created and it being scheduled to |gc_parallel_task_latency_histogram|.
+class V8_EXPORT_PRIVATE ItemParallelJob {
public:
class Task;
- class Item {
+ class V8_EXPORT_PRIVATE Item {
public:
- Item() : state_(kAvailable) {}
- virtual ~Item() {}
+ Item() = default;
+ virtual ~Item() = default;
// Marks an item as being finished.
void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
@@ -45,7 +59,7 @@ class ItemParallelJob {
}
bool IsFinished() { return state_.Value() == kFinished; }
- base::AtomicValue<ProcessingState> state_;
+ base::AtomicValue<ProcessingState> state_{kAvailable};
friend class ItemParallelJob;
friend class ItemParallelJob::Task;
@@ -53,15 +67,10 @@ class ItemParallelJob {
DISALLOW_COPY_AND_ASSIGN(Item);
};
- class Task : public CancelableTask {
+ class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
- explicit Task(Isolate* isolate)
- : CancelableTask(isolate),
- items_(nullptr),
- cur_index_(0),
- items_considered_(0),
- on_finish_(nullptr) {}
- virtual ~Task() {}
+ explicit Task(Isolate* isolate);
+ virtual ~Task();
virtual void RunInParallel() = 0;
@@ -85,42 +94,36 @@ class ItemParallelJob {
}
private:
- void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
- size_t start_index) {
- on_finish_ = on_finish;
- items_ = items;
- cur_index_ = start_index;
- }
+ friend class ItemParallelJob;
+ friend class Item;
- // We don't allow overriding this method any further.
- void RunInternal() final {
- RunInParallel();
- on_finish_->Signal();
- }
+ // Sets up state required before invoking Run(). If
+ // |start_index is >= items_.size()|, this task will not process work items
+ // (some jobs have more tasks than work items in order to parallelize post-
+ // processing, e.g. scavenging). If |gc_parallel_task_latency_histogram| is
+ // provided, it will be used to report histograms on the latency between
+ // posting the task and it being scheduled.
+ void SetupInternal(
+ base::Semaphore* on_finish, std::vector<Item*>* items,
+ size_t start_index,
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram);
- std::vector<Item*>* items_;
- size_t cur_index_;
- size_t items_considered_;
- base::Semaphore* on_finish_;
+ // We don't allow overriding this method any further.
+ void RunInternal() final;
- friend class ItemParallelJob;
- friend class Item;
+ std::vector<Item*>* items_ = nullptr;
+ size_t cur_index_ = 0;
+ size_t items_considered_ = 0;
+ base::Semaphore* on_finish_ = nullptr;
+ base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
- base::Semaphore* pending_tasks)
- : cancelable_task_manager_(cancelable_task_manager),
- pending_tasks_(pending_tasks) {}
-
- ~ItemParallelJob() {
- for (size_t i = 0; i < items_.size(); i++) {
- Item* item = items_[i];
- CHECK(item->IsFinished());
- delete item;
- }
- }
+ base::Semaphore* pending_tasks);
+
+ ~ItemParallelJob();
// Adds a task to the job. Transfers ownership to the job.
void AddTask(Task* task) { tasks_.push_back(task); }
@@ -131,42 +134,9 @@ class ItemParallelJob {
int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
- void Run() {
- DCHECK_GE(tasks_.size(), 0);
- const size_t num_tasks = tasks_.size();
- const size_t num_items = items_.size();
- const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
- CancelableTaskManager::Id* task_ids =
- new CancelableTaskManager::Id[num_tasks];
- size_t start_index = 0;
- Task* main_task = nullptr;
- Task* task = nullptr;
- for (size_t i = 0; i < num_tasks; i++, start_index += items_per_task) {
- task = tasks_[i];
- if (start_index >= num_items) {
- start_index -= num_items;
- }
- task->SetupInternal(pending_tasks_, &items_, start_index);
- task_ids[i] = task->id();
- if (i > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- } else {
- main_task = task;
- }
- }
- // Contribute on main thread.
- main_task->Run();
- delete main_task;
- // Wait for background tasks.
- for (size_t i = 0; i < num_tasks; i++) {
- if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
- pending_tasks_->Wait();
- }
- }
- delete[] task_ids;
- }
+ // Runs this job. Reporting metrics in a thread-safe manner to
+ // |async_counters|.
+ void Run(std::shared_ptr<Counters> async_counters);
private:
std::vector<Item*> items_;
@@ -179,4 +149,4 @@ class ItemParallelJob {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_ITEM_PARALLEL_JOB_
+#endif // V8_HEAP_ITEM_PARALLEL_JOB_H_
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 30a7e55d6b..c6c8c29962 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -16,7 +16,6 @@
#include "src/global-handles.h"
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
-#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -72,7 +71,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
VerifyPointers(start, end);
}
@@ -240,7 +240,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
VerifyPointers(start, end);
}
@@ -369,12 +370,14 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
buffered_objects_.reserve(kBufferSize);
}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
if (!(*p)->IsHeapObject()) return;
AddObject(*p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
AddObject(*p);
@@ -404,14 +407,22 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
std::vector<Object*> buffered_objects_;
};
-} // namespace
-
-static int NumberOfAvailableCores() {
- return Max(
- 1, static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+int NumberOfAvailableCores() {
+ static int num_cores =
+ static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) +
+ 1;
+ // This number of cores should be greater than zero and never change.
+ DCHECK_GE(num_cores, 1);
+ DCHECK_EQ(
+ num_cores,
+ 1 + static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+ return num_cores;
}
+} // namespace
+
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
DCHECK_GT(pages, 0);
return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
@@ -855,7 +866,7 @@ void MarkCompactCollector::Prepare() {
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
heap()->incremental_marking()->Stop();
heap()->incremental_marking()->AbortBlackAllocation();
- FinishConcurrentMarking();
+ FinishConcurrentMarking(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
heap()->incremental_marking()->Deactivate();
ClearMarkbits();
AbortWeakCollections();
@@ -891,9 +902,10 @@ void MarkCompactCollector::Prepare() {
#endif
}
-void MarkCompactCollector::FinishConcurrentMarking() {
+void MarkCompactCollector::FinishConcurrentMarking(
+ ConcurrentMarking::StopRequest stop_request) {
if (FLAG_concurrent_marking) {
- heap()->concurrent_marking()->EnsureCompleted();
+ heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
}
}
@@ -965,11 +977,12 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, Object** p) final {
+ void VisitRootPointer(Root root, const char* description, Object** p) final {
MarkObjectByPointer(root, p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) final {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) final {
for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
}
@@ -1058,7 +1071,8 @@ class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
@@ -1093,7 +1107,8 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
: heap_(collector->heap()),
marking_state_(collector->non_atomic_marking_state()) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
DCHECK_EQ(static_cast<int>(root),
static_cast<int>(Root::kExternalStringsTable));
// Visit all HeapObject pointers in [start, end).
@@ -1391,7 +1406,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment =
+ HeapObject::RequiredAlignment(object->map());
AllocationResult allocation =
local_allocator_->Allocate(target_space, size, alignment);
if (allocation.To(target_object)) {
@@ -1496,7 +1512,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
HeapObject** target_object) {
- AllocationAlignment alignment = old_object->RequiredAlignment();
+ AllocationAlignment alignment =
+ HeapObject::RequiredAlignment(old_object->map());
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation =
local_allocator_->Allocate(NEW_SPACE, size, alignment);
@@ -1758,11 +1775,13 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
: collector_(collector),
marking_state_(collector_->non_atomic_marking_state()) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -1883,6 +1902,8 @@ class BatchedRootMarkingItem : public MarkingItem {
virtual ~BatchedRootMarkingItem() {}
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "BatchedRootMarkingItem::Process");
for (Object* object : objects_) {
task->MarkObject(object);
}
@@ -1900,6 +1921,8 @@ class PageMarkingItem : public MarkingItem {
virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "PageMarkingItem::Process");
base::LockGuard<base::Mutex> guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
@@ -1956,6 +1979,8 @@ class GlobalHandlesMarkingItem : public MarkingItem {
virtual ~GlobalHandlesMarkingItem() {}
void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesMarkingItem::Process");
GlobalHandlesRootMarkingVisitor visitor(task);
global_handles_
->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
@@ -1968,12 +1993,14 @@ class GlobalHandlesMarkingItem : public MarkingItem {
explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
: task_(task) {}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
DCHECK_EQ(Root::kGlobalHandles, root);
for (Object** p = start; p < end; p++) {
task_->MarkObject(*p);
@@ -2061,7 +2088,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel() {
job.AddTask(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
}
- job.Run();
+ job.Run(isolate()->async_counters());
DCHECK(worklist()->IsGlobalEmpty());
}
}
@@ -2336,7 +2363,8 @@ void MarkCompactCollector::MarkLiveObjects() {
}
ProcessMarkingWorklist();
- FinishConcurrentMarking();
+ FinishConcurrentMarking(
+ ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
ProcessMarkingWorklist();
}
@@ -2849,11 +2877,13 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
- void VisitRootPointer(Root root, Object** p) override {
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
UpdateSlotInternal(p);
}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
}
@@ -3009,6 +3039,7 @@ class Evacuator : public Malloced {
};
void Evacuator::EvacuatePage(Page* page) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
@@ -3066,11 +3097,15 @@ class FullEvacuator : public Evacuator {
};
void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
+ const EvacuationMode evacuation_mode = ComputeEvacuationMode(page);
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "FullEvacuator::RawEvacuatePage", "evacuation_mode",
+ evacuation_mode);
MarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(page);
HeapObject* failed_object = nullptr;
- switch (ComputeEvacuationMode(page)) {
+ switch (evacuation_mode) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
page, marking_state, &new_space_visitor_,
@@ -3127,6 +3162,8 @@ class YoungGenerationEvacuator : public Evacuator {
void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
intptr_t* live_bytes) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "YoungGenerationEvacuator::RawEvacuatePage");
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(page);
@@ -3241,7 +3278,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
evacuators[i]->AddObserver(migration_observer);
job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
- job->Run();
+ job->Run(isolate()->async_counters());
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
delete evacuators[i];
@@ -3249,15 +3286,16 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
delete[] evacuators;
if (FLAG_trace_evacuation) {
- PrintIsolate(isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "wanted_tasks=%d tasks=%d cores=%" PRIuS
- " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
- wanted_num_tasks, job->NumberOfTasks(),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
- live_bytes, compaction_speed);
+ PrintIsolate(
+ isolate(),
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
+ "wanted_tasks=%d tasks=%d cores=%" PRIuS " live_bytes=%" V8PRIdPTR
+ " compaction_speed=%.f\n",
+ isolate()->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
+ wanted_num_tasks, job->NumberOfTasks(),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() + 1,
+ live_bytes, compaction_speed);
}
}
@@ -3365,6 +3403,8 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
Visitor* visitor,
IterationMode iteration_mode,
HeapObject** failed_object) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitBlackObjects");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3389,6 +3429,8 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitBlackObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3407,6 +3449,8 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
MarkingState* marking_state,
Visitor* visitor,
IterationMode iteration_mode) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "LiveObjectVisitor::VisitGreyObjectsNoFail");
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject* const object = object_and_size.first;
@@ -3553,6 +3597,8 @@ class ToSpaceUpdatingItem : public UpdatingItem {
private:
void ProcessVisitAll() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ToSpaceUpdatingItem::ProcessVisitAll");
PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
HeapObject* object = HeapObject::FromAddress(cur);
@@ -3564,6 +3610,8 @@ class ToSpaceUpdatingItem : public UpdatingItem {
}
void ProcessVisitLive() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
PointersUpdatingVisitor visitor;
@@ -3592,13 +3640,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
virtual ~RememberedSetUpdatingItem() {}
void Process() override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "RememberedSetUpdatingItem::Process");
base::LockGuard<base::Mutex> guard(chunk_->mutex());
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
- template <AccessMode access_mode>
inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (heap_->InFromSpace(*slot)) {
@@ -3606,13 +3655,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- if (access_mode == AccessMode::ATOMIC) {
- HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
- base::AsAtomicPointer::Relaxed_Store(heap_obj_slot,
- map_word.ToForwardingAddress());
- } else {
- *slot = map_word.ToForwardingAddress();
- }
+ *slot = map_word.ToForwardingAddress();
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
@@ -3648,12 +3691,10 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this](Address slot) {
- return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(slot);
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
@@ -3692,7 +3733,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
[isolate, this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [this](Object** slot) {
- return CheckAndUpdateOldToNewSlot<AccessMode::NON_ATOMIC>(
+ return CheckAndUpdateOldToNewSlot(
reinterpret_cast<Address>(slot));
});
});
@@ -3748,6 +3789,8 @@ class GlobalHandlesUpdatingItem : public UpdatingItem {
virtual ~GlobalHandlesUpdatingItem() {}
void Process() override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesUpdatingItem::Process");
PointersUpdatingVisitor updating_visitor;
global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
}
@@ -3772,6 +3815,9 @@ class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
virtual ~ArrayBufferTrackerUpdatingItem() {}
void Process() override {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
+ state_);
switch (state_) {
case EvacuationState::kRegular:
ArrayBufferTracker::ProcessBuffers(
@@ -3922,7 +3968,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
}
{
@@ -3954,7 +4000,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
}
@@ -4016,7 +4062,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run();
+ updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 6fda00633c..755f0eb4eb 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -8,6 +8,7 @@
#include <deque>
#include <vector>
+#include "src/heap/concurrent-marking.h"
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
@@ -649,7 +650,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// choosing spaces to compact.
void Prepare();
- void FinishConcurrentMarking();
+ // Stop concurrent marking (either by preempting it right away or waiting for
+ // it to complete as requested by |stop_request|).
+ void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
bool StartCompaction();
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 9b1fe61236..58630c52f0 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MARKING_H
-#define V8_MARKING_H
+#ifndef V8_HEAP_MARKING_H_
+#define V8_HEAP_MARKING_H_
#include "src/base/atomic-utils.h"
#include "src/utils.h"
@@ -316,4 +316,4 @@ class Marking : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_MARKING_H_
+#endif // V8_HEAP_MARKING_H_
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index cc1030846a..77317a7b8a 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -201,6 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
+ if (!heap()->use_tasks()) return;
DCHECK_LT(0, delay_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 0f0ad6eaa0..ce6564596e 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_memory_reducer_H
-#define V8_HEAP_memory_reducer_H
+#ifndef V8_HEAP_MEMORY_REDUCER_H_
+#define V8_HEAP_MEMORY_REDUCER_H_
#include "include/v8-platform.h"
#include "src/base/macros.h"
@@ -171,4 +171,4 @@ class V8_EXPORT_PRIVATE MemoryReducer {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_memory_reducer_H
+#endif // V8_HEAP_MEMORY_REDUCER_H_
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index f58a472671..b854dabb2c 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -1,4 +1,5 @@
// Copyright 2015 the V8 project authors. All rights reserved.
+//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -12,6 +13,7 @@
#include "src/counters.h"
#include "src/globals.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/isolate.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/utils.h"
@@ -31,7 +33,6 @@ void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
}
- visited_fixed_array_sub_types_.clear();
}
// Tell the compiler to never inline this: occasionally, the optimizer will
@@ -99,23 +100,14 @@ void ObjectStats::PrintJSON(const char* key) {
#define INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, #name, name);
-#define CODE_KIND_WRAPPER(name) \
- PrintInstanceTypeJSON(key, gc_count, "*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name);
+
#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
PrintInstanceTypeJSON(key, gc_count, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
- CODE_KIND_LIST(CODE_KIND_WRAPPER)
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER)
VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
#undef INSTANCE_TYPE_WRAPPER
-#undef CODE_KIND_WRAPPER
-#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
@@ -150,25 +142,15 @@ void ObjectStats::Dump(std::stringstream& stream) {
stream << "\"type_data\":{";
#define INSTANCE_TYPE_WRAPPER(name) DumpInstanceTypeData(stream, #name, name);
-#define CODE_KIND_WRAPPER(name) \
- DumpInstanceTypeData(stream, "*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name);
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name);
#define VIRTUAL_INSTANCE_TYPE_WRAPPER(name) \
DumpInstanceTypeData(stream, #name, FIRST_VIRTUAL_TYPE + name);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
- CODE_KIND_LIST(CODE_KIND_WRAPPER);
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER);
VIRTUAL_INSTANCE_TYPE_LIST(VIRTUAL_INSTANCE_TYPE_WRAPPER)
stream << "\"END\":{}}}";
#undef INSTANCE_TYPE_WRAPPER
-#undef CODE_KIND_WRAPPER
-#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
#undef VIRTUAL_INSTANCE_TYPE_WRAPPER
}
@@ -202,93 +184,88 @@ void ObjectStats::RecordObjectStats(InstanceType type, size_t size) {
}
void ObjectStats::RecordVirtualObjectStats(VirtualInstanceType type,
- size_t size) {
+ size_t size, size_t over_allocated) {
DCHECK_LE(type, LAST_VIRTUAL_TYPE);
object_counts_[FIRST_VIRTUAL_TYPE + type]++;
object_sizes_[FIRST_VIRTUAL_TYPE + type] += size;
size_histogram_[FIRST_VIRTUAL_TYPE + type][HistogramIndexFromSize(size)]++;
-}
-
-void ObjectStats::RecordCodeSubTypeStats(int code_sub_type, size_t size) {
- int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
- DCHECK_GE(code_sub_type_index, FIRST_CODE_KIND_SUB_TYPE);
- DCHECK_LT(code_sub_type_index, FIRST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[code_sub_type_index]++;
- object_sizes_[code_sub_type_index] += size;
- size_histogram_[code_sub_type_index][HistogramIndexFromSize(size)]++;
-}
-
-bool ObjectStats::RecordFixedArraySubTypeStats(FixedArrayBase* array,
- int array_sub_type, size_t size,
- size_t over_allocated) {
- auto it = visited_fixed_array_sub_types_.insert(array);
- if (!it.second) return false;
- DCHECK_LE(array_sub_type, LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
- size_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(size)]++;
- if (over_allocated > 0) {
- InstanceType type =
- array->IsHashTable() ? HASH_TABLE_TYPE : FIXED_ARRAY_TYPE;
- over_allocated_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] +=
- over_allocated;
- over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
- [HistogramIndexFromSize(over_allocated)]++;
- over_allocated_[type] += over_allocated;
- over_allocated_histogram_[type][HistogramIndexFromSize(over_allocated)]++;
- }
- return true;
+ over_allocated_[FIRST_VIRTUAL_TYPE + type] += over_allocated;
+ over_allocated_histogram_[FIRST_VIRTUAL_TYPE + type]
+ [HistogramIndexFromSize(size)]++;
}
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
class ObjectStatsCollectorImpl {
public:
+ enum Phase {
+ kPhase1,
+ kPhase2,
+ };
+ static const int kNumberOfPhases = kPhase2 + 1;
+
ObjectStatsCollectorImpl(Heap* heap, ObjectStats* stats);
void CollectGlobalStatistics();
+ void CollectStatistics(HeapObject* obj, Phase phase);
- // Collects statistics of objects for virtual instance types.
- void CollectVirtualStatistics(HeapObject* obj);
+ private:
+ enum CowMode {
+ kCheckCow,
+ kIgnoreCow,
+ };
- // Collects statistics of objects for regular instance types.
- void CollectStatistics(HeapObject* obj);
+ Isolate* isolate() { return heap_->isolate(); }
- private:
- class CompilationCacheTableVisitor;
+ bool RecordVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type,
+ size_t size, size_t over_allocated,
+ CowMode check_cow_array = kCheckCow);
+ // Gets size from |ob| and assumes no over allocating.
+ bool RecordSimpleVirtualObjectStats(HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type);
+ // For HashTable it is possible to compute over allocated memory.
+ void RecordHashTableVirtualObjectStats(HeapObject* parent,
+ FixedArray* hash_table,
+ ObjectStats::VirtualInstanceType type);
- void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
- void RecordBytecodeArrayDetails(BytecodeArray* obj);
- void RecordCodeDetails(Code* code);
- void RecordFixedArrayDetails(FixedArray* array);
- void RecordJSCollectionDetails(JSObject* obj);
- void RecordJSObjectDetails(JSObject* object);
- void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
- void RecordMapDetails(Map* map);
- void RecordScriptDetails(Script* obj);
- void RecordTemplateInfoDetails(TemplateInfo* obj);
- void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
-
- bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype, size_t overhead);
- void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
- int subtype);
- template <class HashTable>
- void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
+ bool CanRecordFixedArray(FixedArrayBase* array);
+ bool IsCowArray(FixedArrayBase* array);
- void RecordVirtualObjectStats(HeapObject* obj,
- ObjectStats::VirtualInstanceType type,
- size_t size);
+ // Blacklist for objects that should not be recorded using
+ // VirtualObjectStats and RecordSimpleVirtualObjectStats. For recording those
+ // objects dispatch to the low level ObjectStats::RecordObjectStats manually.
+ bool ShouldRecordObject(HeapObject* object, CowMode check_cow_array);
+
+ void RecordObjectStats(HeapObject* obj, InstanceType type, size_t size);
+
+ // Specific recursion into constant pool or embedded code objects. Records
+ // FixedArrays and Tuple2 that look like ConstantElementsPair.
+ void RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ HeapObject* parent, HeapObject* object,
+ ObjectStats::VirtualInstanceType type);
+
+ // Details.
void RecordVirtualAllocationSiteDetails(AllocationSite* site);
+ void RecordVirtualBytecodeArrayDetails(BytecodeArray* bytecode);
+ void RecordVirtualCodeDetails(Code* code);
+ void RecordVirtualContext(Context* context);
+ void RecordVirtualFeedbackVectorDetails(FeedbackVector* vector);
+ void RecordVirtualFixedArrayDetails(FixedArray* array);
+ void RecordVirtualFunctionTemplateInfoDetails(FunctionTemplateInfo* fti);
+ void RecordVirtualJSGlobalObjectDetails(JSGlobalObject* object);
+ void RecordVirtualJSCollectionDetails(JSObject* object);
+ void RecordVirtualJSObjectDetails(JSObject* object);
+ void RecordVirtualMapDetails(Map* map);
+ void RecordVirtualScriptDetails(Script* script);
+ void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
+ void RecordVirtualJSFunctionDetails(JSFunction* function);
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
std::unordered_set<HeapObject*> virtual_objects_;
-
- friend class ObjectStatsCollectorImpl::CompilationCacheTableVisitor;
};
ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
@@ -298,18 +275,45 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
-// For entries which shared the same instance type (historically FixedArrays)
-// we do a pre-pass and create virtual instance types.
-void ObjectStatsCollectorImpl::CollectVirtualStatistics(HeapObject* obj) {
- if (obj->IsAllocationSite()) {
- RecordVirtualAllocationSiteDetails(AllocationSite::cast(obj));
+bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
+ CowMode check_cow_array) {
+ if (obj->IsFixedArray()) {
+ FixedArray* fixed_array = FixedArray::cast(obj);
+ bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
+ return CanRecordFixedArray(fixed_array) && cow_check;
}
+ if (obj == heap_->empty_property_array()) return false;
+ return true;
+}
+
+void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
+ HeapObject* parent, FixedArray* hash_table,
+ ObjectStats::VirtualInstanceType type) {
+ CHECK(hash_table->IsHashTable());
+ // TODO(mlippautz): Implement over allocation for hash tables.
+ RecordVirtualObjectStats(parent, hash_table, type, hash_table->Size(),
+ ObjectStats::kNoOverAllocation);
}
-void ObjectStatsCollectorImpl::RecordVirtualObjectStats(
- HeapObject* obj, ObjectStats::VirtualInstanceType type, size_t size) {
- virtual_objects_.insert(obj);
- stats_->RecordVirtualObjectStats(type, size);
+bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
+ HeapObject* parent, HeapObject* obj,
+ ObjectStats::VirtualInstanceType type) {
+ return RecordVirtualObjectStats(parent, obj, type, obj->Size(),
+ ObjectStats::kNoOverAllocation, kCheckCow);
+}
+
+bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
+ HeapObject* parent, HeapObject* obj, ObjectStats::VirtualInstanceType type,
+ size_t size, size_t over_allocated, CowMode check_cow_array) {
+ if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array))
+ return false;
+
+ if (virtual_objects_.find(obj) == virtual_objects_.end()) {
+ virtual_objects_.insert(obj);
+ stats_->RecordVirtualObjectStats(type, size, over_allocated);
+ return true;
+ }
+ return false;
}
void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
@@ -317,141 +321,290 @@ void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
if (!site->PointsToLiteral()) return;
JSObject* boilerplate = site->boilerplate();
if (boilerplate->IsJSArray()) {
- RecordVirtualObjectStats(boilerplate,
- ObjectStats::JS_ARRAY_BOILERPLATE_TYPE,
- boilerplate->Size());
+ RecordSimpleVirtualObjectStats(site, boilerplate,
+ ObjectStats::JS_ARRAY_BOILERPLATE_TYPE);
// Array boilerplates cannot have properties.
} else {
- RecordVirtualObjectStats(boilerplate,
- ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
- boilerplate->Size());
+ RecordVirtualObjectStats(
+ site, boilerplate, ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
+ boilerplate->Size(), ObjectStats::kNoOverAllocation);
if (boilerplate->HasFastProperties()) {
- // We'll misclassify the empty_proeprty_array here. Given that there is a
- // single instance, this is neglible.
+ // We'll mis-classify the empty_property_array here. Given that there is a
+ // single instance, this is negligible.
PropertyArray* properties = boilerplate->property_array();
- RecordVirtualObjectStats(properties,
- ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE,
- properties->Size());
+ RecordSimpleVirtualObjectStats(
+ site, properties, ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE);
} else {
NameDictionary* properties = boilerplate->property_dictionary();
- RecordVirtualObjectStats(properties,
- ObjectStats::BOILERPLATE_NAME_DICTIONARY_TYPE,
- properties->Size());
+ RecordSimpleVirtualObjectStats(
+ site, properties, ObjectStats::BOILERPLATE_PROPERTY_DICTIONARY_TYPE);
}
}
FixedArrayBase* elements = boilerplate->elements();
- // We skip COW elements since they are shared, and we are sure that if the
- // boilerplate exists there must have been at least one instantiation.
- if (!elements->IsCowArray()) {
- RecordVirtualObjectStats(elements, ObjectStats::BOILERPLATE_ELEMENTS_TYPE,
- elements->Size());
- }
+ RecordSimpleVirtualObjectStats(site, elements,
+ ObjectStats::BOILERPLATE_ELEMENTS_TYPE);
}
-void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj) {
- Map* map = obj->map();
-
- // Record for the InstanceType.
- int object_size = obj->Size();
- RecordObjectStats(obj, map->instance_type(), object_size);
-
- // Record specific sub types where possible.
- if (obj->IsMap()) RecordMapDetails(Map::cast(obj));
- if (obj->IsObjectTemplateInfo() || obj->IsFunctionTemplateInfo()) {
- RecordTemplateInfoDetails(TemplateInfo::cast(obj));
+void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
+ FunctionTemplateInfo* fti) {
+ // named_property_handler and indexed_property_handler are recorded as
+ // INTERCEPTOR_INFO_TYPE.
+ if (!fti->call_code()->IsUndefined(isolate())) {
+ RecordSimpleVirtualObjectStats(
+ fti, CallHandlerInfo::cast(fti->call_code()),
+ ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (obj->IsBytecodeArray()) {
- RecordBytecodeArrayDetails(BytecodeArray::cast(obj));
+ if (!fti->instance_call_handler()->IsUndefined(isolate())) {
+ RecordSimpleVirtualObjectStats(
+ fti, CallHandlerInfo::cast(fti->instance_call_handler()),
+ ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (obj->IsCode()) RecordCodeDetails(Code::cast(obj));
- if (obj->IsSharedFunctionInfo()) {
- RecordSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSGlobalObjectDetails(
+ JSGlobalObject* object) {
+ // Properties.
+ GlobalDictionary* properties = object->global_dictionary();
+ RecordHashTableVirtualObjectStats(object, properties,
+ ObjectStats::GLOBAL_PROPERTIES_TYPE);
+ // Elements.
+ FixedArrayBase* elements = object->elements();
+ RecordSimpleVirtualObjectStats(object, elements,
+ ObjectStats::GLOBAL_ELEMENTS_TYPE);
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSCollectionDetails(
+ JSObject* object) {
+ if (object->IsJSMap()) {
+ RecordSimpleVirtualObjectStats(
+ object, FixedArray::cast(JSMap::cast(object)->table()),
+ ObjectStats::JS_COLLETION_TABLE_TYPE);
}
- if (obj->IsFixedArray()) RecordFixedArrayDetails(FixedArray::cast(obj));
- if (obj->IsJSObject()) RecordJSObjectDetails(JSObject::cast(obj));
- if (obj->IsJSWeakCollection()) {
- RecordJSWeakCollectionDetails(JSWeakCollection::cast(obj));
+ if (object->IsJSSet()) {
+ RecordSimpleVirtualObjectStats(
+ object, FixedArray::cast(JSSet::cast(object)->table()),
+ ObjectStats::JS_COLLETION_TABLE_TYPE);
}
- if (obj->IsJSCollection()) {
- RecordJSCollectionDetails(JSObject::cast(obj));
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject* object) {
+ // JSGlobalObject is recorded separately.
+ if (object->IsJSGlobalObject()) return;
+
+ // Properties.
+ if (object->HasFastProperties()) {
+ PropertyArray* properties = object->property_array();
+ CHECK_EQ(PROPERTY_ARRAY_TYPE, properties->map()->instance_type());
+ } else {
+ NameDictionary* properties = object->property_dictionary();
+ RecordHashTableVirtualObjectStats(
+ object, properties, ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
}
- if (obj->IsScript()) RecordScriptDetails(Script::cast(obj));
+ // Elements.
+ FixedArrayBase* elements = object->elements();
+ RecordSimpleVirtualObjectStats(object, elements, ObjectStats::ELEMENTS_TYPE);
}
-class ObjectStatsCollectorImpl::CompilationCacheTableVisitor
- : public RootVisitor {
- public:
- explicit CompilationCacheTableVisitor(ObjectStatsCollectorImpl* parent)
- : parent_(parent) {}
-
- void VisitRootPointers(Root root, Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- HeapObject* obj = HeapObject::cast(*current);
- if (obj->IsUndefined(parent_->heap_->isolate())) continue;
- CHECK(obj->IsCompilationCacheTable());
- parent_->RecordHashTableHelper(nullptr, CompilationCacheTable::cast(obj),
- COMPILATION_CACHE_TABLE_SUB_TYPE);
+static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
+ Object* obj, FeedbackSlotKind kind, Isolate* isolate) {
+ switch (kind) {
+ case FeedbackSlotKind::kCall:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_CALL_TYPE;
+
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+ case FeedbackSlotKind::kLoadKeyed:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE;
+
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kStoreGlobalSloppy:
+ case FeedbackSlotKind::kStoreGlobalStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ if (obj == *isolate->factory()->uninitialized_symbol() ||
+ obj == *isolate->factory()->premonomorphic_symbol()) {
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE;
+ }
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_TYPE;
+
+ case FeedbackSlotKind::kBinaryOp:
+ case FeedbackSlotKind::kCompareOp:
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_ENUM_TYPE;
+
+ default:
+ return ObjectStats::FEEDBACK_VECTOR_SLOT_OTHER_TYPE;
+ }
+}
+
+void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
+ FeedbackVector* vector) {
+ if (virtual_objects_.find(vector) == virtual_objects_.end()) {
+ // Manually insert the feedback vector into the virtual object list, since
+ // we're logging its component parts separately.
+ virtual_objects_.insert(vector);
+
+ size_t calculated_size = 0;
+
+ // Log the feedback vector's header (fixed fields).
+ size_t header_size =
+ reinterpret_cast<Address>(vector->slots_start()) - vector->address();
+ stats_->RecordVirtualObjectStats(ObjectStats::FEEDBACK_VECTOR_HEADER_TYPE,
+ header_size,
+ ObjectStats::kNoOverAllocation);
+ calculated_size += header_size;
+
+ // Iterate over the feedback slots and log each one.
+ FeedbackMetadataIterator it(vector->metadata());
+ while (it.HasNext()) {
+ FeedbackSlot slot = it.Next();
+ // Log the entry (or entries) taken up by this slot.
+ size_t slot_size = it.entry_size() * kPointerSize;
+ stats_->RecordVirtualObjectStats(
+ GetFeedbackSlotType(vector->Get(slot), it.kind(), heap_->isolate()),
+ slot_size, ObjectStats::kNoOverAllocation);
+ calculated_size += slot_size;
+
+ // Log the monomorphic/polymorphic helper objects that this slot owns.
+ for (int i = 0; i < it.entry_size(); i++) {
+ Object* raw_object = vector->get(slot.ToInt() + i);
+ if (!raw_object->IsHeapObject()) continue;
+ HeapObject* object = HeapObject::cast(raw_object);
+ if (object->IsCell() || object->IsFixedArray()) {
+ RecordSimpleVirtualObjectStats(
+ vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
+ }
+ }
}
+
+ CHECK_EQ(calculated_size, vector->Size());
}
+}
- private:
- ObjectStatsCollectorImpl* parent_;
-};
+void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
+ FixedArray* array) {
+ if (IsCowArray(array)) {
+ RecordVirtualObjectStats(nullptr, array, ObjectStats::COW_ARRAY_TYPE,
+ array->Size(), ObjectStats::kNoOverAllocation,
+ kIgnoreCow);
+ }
+}
+
+void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj, Phase phase) {
+ Map* map = obj->map();
+ switch (phase) {
+ case kPhase1:
+ if (obj->IsFeedbackVector()) {
+ RecordVirtualFeedbackVectorDetails(FeedbackVector::cast(obj));
+ } else if (obj->IsMap()) {
+ RecordVirtualMapDetails(Map::cast(obj));
+ } else if (obj->IsBytecodeArray()) {
+ RecordVirtualBytecodeArrayDetails(BytecodeArray::cast(obj));
+ } else if (obj->IsCode()) {
+ RecordVirtualCodeDetails(Code::cast(obj));
+ } else if (obj->IsFunctionTemplateInfo()) {
+ RecordVirtualFunctionTemplateInfoDetails(
+ FunctionTemplateInfo::cast(obj));
+ } else if (obj->IsJSFunction()) {
+ RecordVirtualJSFunctionDetails(JSFunction::cast(obj));
+ } else if (obj->IsJSGlobalObject()) {
+ RecordVirtualJSGlobalObjectDetails(JSGlobalObject::cast(obj));
+ } else if (obj->IsJSObject()) {
+ // This phase needs to come after RecordVirtualAllocationSiteDetails
+ // to properly split among boilerplates.
+ RecordVirtualJSObjectDetails(JSObject::cast(obj));
+ } else if (obj->IsJSCollection()) {
+ RecordVirtualJSCollectionDetails(JSObject::cast(obj));
+ } else if (obj->IsSharedFunctionInfo()) {
+ RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
+ } else if (obj->IsContext()) {
+ RecordVirtualContext(Context::cast(obj));
+ } else if (obj->IsScript()) {
+ RecordVirtualScriptDetails(Script::cast(obj));
+ } else if (obj->IsFixedArray()) {
+ // Has to go last as it triggers too eagerly.
+ RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
+ }
+ break;
+ case kPhase2:
+ RecordObjectStats(obj, map->instance_type(), obj->Size());
+ break;
+ }
+}
void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
- // Global FixedArrays.
- RecordFixedArrayHelper(nullptr, heap_->weak_new_space_object_to_code_list(),
- WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->serialized_objects(),
- SERIALIZED_OBJECTS_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->number_string_cache(),
- NUMBER_STRING_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->single_character_string_cache(),
- SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->string_split_cache(),
- STRING_SPLIT_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->regexp_multiple_cache(),
- REGEXP_MULTIPLE_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, heap_->retained_maps(),
- RETAINED_MAPS_SUB_TYPE, 0);
-
- // Global weak FixedArrays.
- RecordFixedArrayHelper(
+ // Iterate boilerplates first to disambiguate them from regular JS objects.
+ Object* list = heap_->allocation_sites_list();
+ while (list->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(list);
+ RecordVirtualAllocationSiteDetails(site);
+ list = site->weak_next();
+ }
+
+ // FixedArray.
+ RecordSimpleVirtualObjectStats(
+ nullptr, heap_->weak_new_space_object_to_code_list(),
+ ObjectStats::WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->serialized_objects(),
+ ObjectStats::SERIALIZED_OBJECTS_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->number_string_cache(),
+ ObjectStats::NUMBER_STRING_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(
+ nullptr, heap_->single_character_string_cache(),
+ ObjectStats::SINGLE_CHARACTER_STRING_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->string_split_cache(),
+ ObjectStats::STRING_SPLIT_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->regexp_multiple_cache(),
+ ObjectStats::REGEXP_MULTIPLE_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
+ ObjectStats::RETAINED_MAPS_TYPE);
+
+ // WeakFixedArray.
+ RecordSimpleVirtualObjectStats(
nullptr, WeakFixedArray::cast(heap_->noscript_shared_function_infos()),
- NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
- RecordFixedArrayHelper(nullptr, WeakFixedArray::cast(heap_->script_list()),
- SCRIPT_LIST_SUB_TYPE, 0);
-
- // Global hash tables.
- RecordHashTableHelper(nullptr, heap_->string_table(), STRING_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->weak_object_to_code_table(),
- OBJECT_TO_CODE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->code_stubs(),
- CODE_STUBS_TABLE_SUB_TYPE);
- RecordHashTableHelper(nullptr, heap_->empty_property_dictionary(),
- EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
- CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
- CompilationCacheTableVisitor v(this);
- compilation_cache->Iterate(&v);
+ ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr,
+ WeakFixedArray::cast(heap_->script_list()),
+ ObjectStats::SCRIPT_LIST_TYPE);
+
+ // HashTable.
+ RecordHashTableVirtualObjectStats(nullptr, heap_->string_table(),
+ ObjectStats::STRING_TABLE_TYPE);
+ RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
+ ObjectStats::CODE_STUBS_TABLE_TYPE);
+
+ // WeakHashTable.
+ RecordHashTableVirtualObjectStats(nullptr, heap_->weak_object_to_code_table(),
+ ObjectStats::OBJECT_TO_CODE_TYPE);
}
void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
InstanceType type,
size_t size) {
- if (virtual_objects_.find(obj) == virtual_objects_.end())
+ if (virtual_objects_.find(obj) == virtual_objects_.end()) {
stats_->RecordObjectStats(type, size);
+ }
}
-static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
- return array->map()->instance_type() == FIXED_ARRAY_TYPE &&
- array != heap->empty_fixed_array() &&
- array != heap->empty_sloppy_arguments_elements() &&
- array != heap->empty_slow_element_dictionary() &&
- array != heap->empty_property_dictionary();
+bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase* array) {
+ return array != heap_->empty_fixed_array() &&
+ array != heap_->empty_sloppy_arguments_elements() &&
+ array != heap_->empty_slow_element_dictionary() &&
+ array != heap_->empty_property_dictionary();
}
-static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
- return array->map() == heap->fixed_cow_array_map();
+bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase* array) {
+ return array->map() == heap_->fixed_cow_array_map();
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
@@ -460,256 +613,226 @@ bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
-bool ObjectStatsCollectorImpl::RecordFixedArrayHelper(HeapObject* parent,
- FixedArray* array,
- int subtype,
- size_t overhead) {
- if (SameLiveness(parent, array) && CanRecordFixedArray(heap_, array) &&
- !IsCowArray(heap_, array)) {
- return stats_->RecordFixedArraySubTypeStats(array, subtype, array->Size(),
- overhead);
+void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
+ // TODO(mlippautz): map->dependent_code(): DEPENDENT_CODE_TYPE.
+
+ DescriptorArray* array = map->instance_descriptors();
+ if (map->owns_descriptors() && array != heap_->empty_descriptor_array()) {
+ // DescriptorArray has its own instance type.
+ EnumCache* enum_cache = array->GetEnumCache();
+ RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
+ ObjectStats::ENUM_CACHE_TYPE);
+ RecordSimpleVirtualObjectStats(array, enum_cache->indices(),
+ ObjectStats::ENUM_INDICES_CACHE_TYPE);
}
- return false;
-}
-void ObjectStatsCollectorImpl::RecursivelyRecordFixedArrayHelper(
- HeapObject* parent, FixedArray* array, int subtype) {
- if (RecordFixedArrayHelper(parent, array, subtype, 0)) {
- for (int i = 0; i < array->length(); i++) {
- if (array->get(i)->IsFixedArray()) {
- RecursivelyRecordFixedArrayHelper(
- parent, FixedArray::cast(array->get(i)), subtype);
+ if (map->is_prototype_map()) {
+ if (map->prototype_info()->IsPrototypeInfo()) {
+ PrototypeInfo* info = PrototypeInfo::cast(map->prototype_info());
+ Object* users = info->prototype_users();
+ if (users->IsWeakFixedArray()) {
+ RecordSimpleVirtualObjectStats(map, WeakFixedArray::cast(users),
+ ObjectStats::PROTOTYPE_USERS_TYPE);
}
}
}
}
-template <class HashTable>
-void ObjectStatsCollectorImpl::RecordHashTableHelper(HeapObject* parent,
- HashTable* array,
- int subtype) {
- int used = array->NumberOfElements() * HashTable::kEntrySize * kPointerSize;
- CHECK_GE(array->Size(), used);
- size_t overhead = array->Size() - used -
- HashTable::kElementsStartIndex * kPointerSize -
- FixedArray::kHeaderSize;
- RecordFixedArrayHelper(parent, array, subtype, overhead);
-}
-
-void ObjectStatsCollectorImpl::RecordJSObjectDetails(JSObject* object) {
- size_t overhead = 0;
- FixedArrayBase* elements = object->elements();
- if (CanRecordFixedArray(heap_, elements) && !IsCowArray(heap_, elements)) {
- if (elements->IsDictionary() && SameLiveness(object, elements)) {
- NumberDictionary* dict = NumberDictionary::cast(elements);
- RecordHashTableHelper(object, dict, DICTIONARY_ELEMENTS_SUB_TYPE);
- } else {
- if (IsHoleyElementsKind(object->GetElementsKind())) {
- int used = object->GetFastElementsUsage() * kPointerSize;
- if (object->GetElementsKind() == HOLEY_DOUBLE_ELEMENTS) used *= 2;
- CHECK_GE(elements->Size(), used);
- overhead = elements->Size() - used - FixedArray::kHeaderSize;
- }
- stats_->RecordFixedArraySubTypeStats(elements, PACKED_ELEMENTS_SUB_TYPE,
- elements->Size(), overhead);
+void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
+ FixedArray* infos = script->shared_function_infos();
+ RecordSimpleVirtualObjectStats(
+ script, script->shared_function_infos(),
+ ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
+ // Split off weak cells from the regular weak cell type.
+ for (int i = 0; i < infos->length(); i++) {
+ if (infos->get(i)->IsWeakCell()) {
+ RecordSimpleVirtualObjectStats(
+ infos, WeakCell::cast(infos->get(i)),
+ ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
}
}
- if (object->IsJSGlobalObject()) {
- GlobalDictionary* properties =
- JSGlobalObject::cast(object)->global_dictionary();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties)) {
- RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
- }
- } else if (!object->HasFastProperties()) {
- NameDictionary* properties = object->property_dictionary();
- if (CanRecordFixedArray(heap_, properties) &&
- SameLiveness(object, properties)) {
- RecordHashTableHelper(object, properties, DICTIONARY_PROPERTIES_SUB_TYPE);
- }
+ // Log the size of external source code.
+ Object* source = script->source();
+ if (source->IsExternalString()) {
+ // The contents of external strings aren't on the heap, so we have to record
+ // them manually.
+ ExternalString* external_source_string = ExternalString::cast(source);
+ size_t length_multiplier = external_source_string->IsTwoByteRepresentation()
+ ? kShortSize
+ : kCharSize;
+ size_t off_heap_size = external_source_string->length() * length_multiplier;
+ size_t on_heap_size = external_source_string->Size();
+ RecordVirtualObjectStats(script, external_source_string,
+ ObjectStats::SCRIPT_SOURCE_EXTERNAL_TYPE,
+ on_heap_size + off_heap_size,
+ ObjectStats::kNoOverAllocation);
+ } else if (source->IsHeapObject()) {
+ RecordSimpleVirtualObjectStats(
+ script, HeapObject::cast(source),
+ ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TYPE);
}
}
-void ObjectStatsCollectorImpl::RecordJSWeakCollectionDetails(
- JSWeakCollection* obj) {
- if (obj->table()->IsHashTable()) {
- ObjectHashTable* table = ObjectHashTable::cast(obj->table());
- int used = table->NumberOfElements() * ObjectHashTable::kEntrySize;
- size_t overhead = table->Size() - used;
- RecordFixedArrayHelper(obj, table, JS_WEAK_COLLECTION_SUB_TYPE, overhead);
+void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
+ SharedFunctionInfo* info) {
+ // Uncompiled SharedFunctionInfo gets its own category.
+ if (!info->is_compiled()) {
+ RecordSimpleVirtualObjectStats(
+ nullptr, info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
}
+ // SharedFunctonInfo::feedback_metadata() is a COW array.
+ FeedbackMetadata* fm = FeedbackMetadata::cast(info->feedback_metadata());
+ RecordVirtualObjectStats(info, fm, ObjectStats::FEEDBACK_METADATA_TYPE,
+ fm->Size(), ObjectStats::kNoOverAllocation,
+ kIgnoreCow);
}
-void ObjectStatsCollectorImpl::RecordJSCollectionDetails(JSObject* obj) {
- // The JS versions use a different HashTable implementation that cannot use
- // the regular helper. Since overall impact is usually small just record
- // without overhead.
- if (obj->IsJSMap()) {
- RecordFixedArrayHelper(nullptr, FixedArray::cast(JSMap::cast(obj)->table()),
- JS_COLLECTION_SUB_TYPE, 0);
- }
- if (obj->IsJSSet()) {
- RecordFixedArrayHelper(nullptr, FixedArray::cast(JSSet::cast(obj)->table()),
- JS_COLLECTION_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
+ JSFunction* function) {
+ // Uncompiled JSFunctions get their own category.
+ if (!function->is_compiled()) {
+ RecordSimpleVirtualObjectStats(nullptr, function,
+ ObjectStats::UNCOMPILED_JS_FUNCTION_TYPE);
}
}
-void ObjectStatsCollectorImpl::RecordScriptDetails(Script* obj) {
- FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
- RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
-}
+namespace {
-void ObjectStatsCollectorImpl::RecordMapDetails(Map* map_obj) {
- DescriptorArray* array = map_obj->instance_descriptors();
- if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
- SameLiveness(map_obj, array)) {
- RecordFixedArrayHelper(map_obj, array, DESCRIPTOR_ARRAY_SUB_TYPE, 0);
- EnumCache* enum_cache = array->GetEnumCache();
- RecordFixedArrayHelper(array, enum_cache->keys(), ENUM_CACHE_SUB_TYPE, 0);
- RecordFixedArrayHelper(array, enum_cache->indices(),
- ENUM_INDICES_CACHE_SUB_TYPE, 0);
- }
+bool MatchesConstantElementsPair(Object* object) {
+ if (!object->IsTuple2()) return false;
+ Tuple2* tuple = Tuple2::cast(object);
+ return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArray();
+}
- for (DependentCode* cur_dependent_code = map_obj->dependent_code();
- cur_dependent_code != heap_->empty_fixed_array();
- cur_dependent_code = DependentCode::cast(
- cur_dependent_code->get(DependentCode::kNextLinkIndex))) {
- RecordFixedArrayHelper(map_obj, cur_dependent_code, DEPENDENT_CODE_SUB_TYPE,
- 0);
- }
+} // namespace
- if (map_obj->is_prototype_map()) {
- if (map_obj->prototype_info()->IsPrototypeInfo()) {
- PrototypeInfo* info = PrototypeInfo::cast(map_obj->prototype_info());
- Object* users = info->prototype_users();
- if (users->IsWeakFixedArray()) {
- RecordFixedArrayHelper(map_obj, WeakFixedArray::cast(users),
- PROTOTYPE_USERS_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ HeapObject* parent, HeapObject* object,
+ ObjectStats::VirtualInstanceType type) {
+ if (RecordSimpleVirtualObjectStats(parent, object, type)) {
+ if (object->IsFixedArray()) {
+ FixedArray* array = FixedArray::cast(object);
+ for (int i = 0; i < array->length(); i++) {
+ Object* entry = array->get(i);
+ if (!entry->IsHeapObject()) continue;
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ array, HeapObject::cast(entry), type);
}
+ } else if (MatchesConstantElementsPair(object)) {
+ Tuple2* tuple = Tuple2::cast(object);
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ tuple, HeapObject::cast(tuple->value2()), type);
}
}
}
-void ObjectStatsCollectorImpl::RecordTemplateInfoDetails(TemplateInfo* obj) {
- if (obj->property_accessors()->IsFixedArray()) {
- RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_accessors()),
- TEMPLATE_INFO_SUB_TYPE, 0);
- }
- if (obj->property_list()->IsFixedArray()) {
- RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_list()),
- TEMPLATE_INFO_SUB_TYPE, 0);
+void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
+ BytecodeArray* bytecode) {
+ RecordSimpleVirtualObjectStats(
+ bytecode, bytecode->constant_pool(),
+ ObjectStats::BYTECODE_ARRAY_CONSTANT_POOL_TYPE);
+ // FixedArrays on constant pool are used for holding descriptor information.
+ // They are shared with optimized code.
+ FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
+ for (int i = 0; i < constant_pool->length(); i++) {
+ Object* entry = constant_pool->get(i);
+ if (entry->IsFixedArray() || MatchesConstantElementsPair(entry)) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ constant_pool, HeapObject::cast(entry),
+ ObjectStats::EMBEDDED_OBJECT_TYPE);
+ }
}
+ RecordSimpleVirtualObjectStats(
+ bytecode, bytecode->handler_table(),
+ ObjectStats::BYTECODE_ARRAY_HANDLER_TABLE_TYPE);
}
-void ObjectStatsCollectorImpl::RecordBytecodeArrayDetails(BytecodeArray* obj) {
- RecordFixedArrayHelper(obj, obj->constant_pool(),
- BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE, 0);
- RecordFixedArrayHelper(obj, obj->handler_table(),
- BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE, 0);
+namespace {
+
+ObjectStats::VirtualInstanceType CodeKindToVirtualInstanceType(
+ Code::Kind kind) {
+ switch (kind) {
+#define CODE_KIND_CASE(type) \
+ case Code::type: \
+ return ObjectStats::type;
+ CODE_KIND_LIST(CODE_KIND_CASE)
+#undef CODE_KIND_CASE
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
}
-void ObjectStatsCollectorImpl::RecordCodeDetails(Code* code) {
- stats_->RecordCodeSubTypeStats(code->kind(), code->Size());
- RecordFixedArrayHelper(code, code->deoptimization_data(),
- DEOPTIMIZATION_DATA_SUB_TYPE, 0);
+} // namespace
+
+void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
+ RecordSimpleVirtualObjectStats(nullptr, code,
+ CodeKindToVirtualInstanceType(code->kind()));
+ RecordSimpleVirtualObjectStats(code, code->deoptimization_data(),
+ ObjectStats::DEOPTIMIZATION_DATA_TYPE);
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
DeoptimizationData* input_data =
DeoptimizationData::cast(code->deoptimization_data());
if (input_data->length() > 0) {
- RecordFixedArrayHelper(code->deoptimization_data(),
- input_data->LiteralArray(),
- OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+ RecordSimpleVirtualObjectStats(code->deoptimization_data(),
+ input_data->LiteralArray(),
+ ObjectStats::OPTIMIZED_CODE_LITERALS_TYPE);
}
}
- RecordFixedArrayHelper(code, code->handler_table(), HANDLER_TABLE_SUB_TYPE,
- 0);
int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Object* target = it.rinfo()->target_object();
- if (target->IsFixedArray()) {
- RecursivelyRecordFixedArrayHelper(code, FixedArray::cast(target),
- EMBEDDED_OBJECT_SUB_TYPE);
+ if (target->IsFixedArray() || MatchesConstantElementsPair(target)) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
}
}
}
-void ObjectStatsCollectorImpl::RecordSharedFunctionInfoDetails(
- SharedFunctionInfo* sfi) {
- FixedArray* scope_info = sfi->scope_info();
- RecordFixedArrayHelper(sfi, scope_info, SCOPE_INFO_SUB_TYPE, 0);
- FeedbackMetadata* feedback_metadata = sfi->feedback_metadata();
- if (!feedback_metadata->is_empty()) {
- RecordFixedArrayHelper(sfi, feedback_metadata, FEEDBACK_METADATA_SUB_TYPE,
- 0);
- }
-}
-
-void ObjectStatsCollectorImpl::RecordFixedArrayDetails(FixedArray* array) {
- if (array->IsContext()) {
- RecordFixedArrayHelper(nullptr, array, CONTEXT_SUB_TYPE, 0);
- }
- if (IsCowArray(heap_, array) && CanRecordFixedArray(heap_, array)) {
- stats_->RecordFixedArraySubTypeStats(array, COPY_ON_WRITE_SUB_TYPE,
- array->Size(), 0);
- }
- if (array->IsNativeContext()) {
- Context* native_ctx = Context::cast(array);
- RecordHashTableHelper(array,
- native_ctx->slow_template_instantiations_cache(),
- SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE);
- FixedArray* fast_cache = native_ctx->fast_template_instantiations_cache();
- stats_->RecordFixedArraySubTypeStats(
- fast_cache, FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE,
- fast_cache->Size(), 0);
+void ObjectStatsCollectorImpl::RecordVirtualContext(Context* context) {
+ if (context->IsNativeContext()) {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::NATIVE_CONTEXT_TYPE);
+ } else if (context->IsFunctionContext()) {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::FUNCTION_CONTEXT_TYPE);
+ } else {
+ RecordSimpleVirtualObjectStats(nullptr, context,
+ ObjectStats::OTHER_CONTEXT_TYPE);
}
}
class ObjectStatsVisitor {
public:
- enum CollectionMode {
- kRegular,
- kVirtual,
- };
-
ObjectStatsVisitor(Heap* heap, ObjectStatsCollectorImpl* live_collector,
ObjectStatsCollectorImpl* dead_collector,
- CollectionMode mode)
+ ObjectStatsCollectorImpl::Phase phase)
: live_collector_(live_collector),
dead_collector_(dead_collector),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()),
- mode_(mode) {}
+ phase_(phase) {}
bool Visit(HeapObject* obj, int size) {
if (marking_state_->IsBlack(obj)) {
- Collect(live_collector_, obj);
+ live_collector_->CollectStatistics(obj, phase_);
} else {
DCHECK(!marking_state_->IsGrey(obj));
- Collect(dead_collector_, obj);
+ dead_collector_->CollectStatistics(obj, phase_);
}
return true;
}
private:
- void Collect(ObjectStatsCollectorImpl* collector, HeapObject* obj) {
- switch (mode_) {
- case kRegular:
- collector->CollectStatistics(obj);
- break;
- case kVirtual:
- collector->CollectVirtualStatistics(obj);
- break;
- }
- }
-
ObjectStatsCollectorImpl* live_collector_;
ObjectStatsCollectorImpl* dead_collector_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
- CollectionMode mode_;
+ ObjectStatsCollectorImpl::Phase phase_;
};
namespace {
@@ -731,19 +854,10 @@ void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
void ObjectStatsCollector::Collect() {
ObjectStatsCollectorImpl live_collector(heap_, live_);
ObjectStatsCollectorImpl dead_collector(heap_, dead_);
- // 1. Collect system type otherwise indistinguishable from other types.
- {
- ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
- ObjectStatsVisitor::kVirtual);
- IterateHeap(heap_, &visitor);
- }
-
- // 2. Collect globals; only applies to live objects.
live_collector.CollectGlobalStatistics();
- // 3. Collect rest.
- {
+ for (int i = 0; i < ObjectStatsCollectorImpl::kNumberOfPhases; i++) {
ObjectStatsVisitor visitor(heap_, &live_collector, &dead_collector,
- ObjectStatsVisitor::kRegular);
+ static_cast<ObjectStatsCollectorImpl::Phase>(i));
IterateHeap(heap_, &visitor);
}
}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 500ce36bd9..723ae53fd5 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -5,13 +5,8 @@
#ifndef V8_HEAP_OBJECT_STATS_H_
#define V8_HEAP_OBJECT_STATS_H_
-#include <set>
-
-#include "src/base/ieee754.h"
-#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/objects-visiting.h"
#include "src/objects.h"
+#include "src/objects/code.h"
// These instance types do not exist for actual use but are merely introduced
// for object stats tracing. In contrast to Code and FixedArray sub types
@@ -19,18 +14,71 @@
// tracing.
//
// Update LAST_VIRTUAL_TYPE below when changing this macro.
-#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
- V(BOILERPLATE_ELEMENTS_TYPE) \
- V(BOILERPLATE_NAME_DICTIONARY_TYPE) \
- V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
- V(JS_ARRAY_BOILERPLATE_TYPE) \
- V(JS_OBJECT_BOILERPLATE_TYPE)
+#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
+ CODE_KIND_LIST(V) \
+ V(BOILERPLATE_ELEMENTS_TYPE) \
+ V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
+ V(BOILERPLATE_PROPERTY_DICTIONARY_TYPE) \
+ V(BYTECODE_ARRAY_CONSTANT_POOL_TYPE) \
+ V(BYTECODE_ARRAY_HANDLER_TABLE_TYPE) \
+ V(CODE_STUBS_TABLE_TYPE) \
+ V(COW_ARRAY_TYPE) \
+ V(DEOPTIMIZATION_DATA_TYPE) \
+ V(DEPENDENT_CODE_TYPE) \
+ V(ELEMENTS_TYPE) \
+ V(EMBEDDED_OBJECT_TYPE) \
+ V(ENUM_CACHE_TYPE) \
+ V(ENUM_INDICES_CACHE_TYPE) \
+ V(FEEDBACK_METADATA_TYPE) \
+ V(FEEDBACK_VECTOR_ENTRY_TYPE) \
+ V(FEEDBACK_VECTOR_HEADER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_ENUM_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_OTHER_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_TYPE) \
+ V(FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE) \
+ V(FUNCTION_CONTEXT_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE) \
+ V(GLOBAL_ELEMENTS_TYPE) \
+ V(GLOBAL_PROPERTIES_TYPE) \
+ V(JS_ARRAY_BOILERPLATE_TYPE) \
+ V(JS_COLLETION_TABLE_TYPE) \
+ V(JS_OBJECT_BOILERPLATE_TYPE) \
+ V(NATIVE_CONTEXT_TYPE) \
+ V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(NUMBER_STRING_CACHE_TYPE) \
+ V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
+ V(OBJECT_TO_CODE_TYPE) \
+ V(OPTIMIZED_CODE_LITERALS_TYPE) \
+ V(OTHER_CONTEXT_TYPE) \
+ V(PROTOTYPE_USERS_TYPE) \
+ V(REGEXP_MULTIPLE_CACHE_TYPE) \
+ V(RETAINED_MAPS_TYPE) \
+ V(SCRIPT_LIST_TYPE) \
+ V(SCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_TYPE) \
+ V(SERIALIZED_OBJECTS_TYPE) \
+ V(SINGLE_CHARACTER_STRING_CACHE_TYPE) \
+ V(STRING_SPLIT_CACHE_TYPE) \
+ V(STRING_TABLE_TYPE) \
+ V(UNCOMPILED_JS_FUNCTION_TYPE) \
+ V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
+ V(WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE)
namespace v8 {
namespace internal {
+class Heap;
+class Isolate;
+
class ObjectStats {
public:
+ static const size_t kNoOverAllocation = 0;
+
explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(); }
// See description on VIRTUAL_INSTANCE_TYPE_LIST.
@@ -38,18 +86,14 @@ class ObjectStats {
#define DEFINE_VIRTUAL_INSTANCE_TYPE(type) type,
VIRTUAL_INSTANCE_TYPE_LIST(DEFINE_VIRTUAL_INSTANCE_TYPE)
#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_VIRTUAL_TYPE = JS_OBJECT_BOILERPLATE_TYPE,
+ LAST_VIRTUAL_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE,
};
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
enum {
- FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
- FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- FIRST_VIRTUAL_TYPE =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ FIRST_VIRTUAL_TYPE = LAST_TYPE + 1,
OBJECT_STATS_COUNT = FIRST_VIRTUAL_TYPE + LAST_VIRTUAL_TYPE + 1,
};
@@ -60,10 +104,8 @@ class ObjectStats {
void CheckpointObjectStats();
void RecordObjectStats(InstanceType type, size_t size);
- void RecordVirtualObjectStats(VirtualInstanceType type, size_t size);
- void RecordCodeSubTypeStats(int code_sub_type, size_t size);
- bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,
- size_t size, size_t over_allocated);
+ void RecordVirtualObjectStats(VirtualInstanceType type, size_t size,
+ size_t over_allocated);
size_t object_count_last_gc(size_t index) {
return object_counts_last_time_[index];
@@ -105,8 +147,6 @@ class ObjectStats {
// Detailed histograms by InstanceType.
size_t size_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
size_t over_allocated_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
-
- std::set<FixedArrayBase*> visited_fixed_array_sub_types_;
};
class ObjectStatsCollector {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 0a8c866979..8384cead02 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_VISITING_INL_H_
-#define V8_OBJECTS_VISITING_INL_H_
+#ifndef V8_HEAP_OBJECTS_VISITING_INL_H_
+#define V8_HEAP_OBJECTS_VISITING_INL_H_
#include "src/heap/objects-visiting.h"
@@ -189,4 +189,4 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_VISITING_INL_H_
+#endif // V8_HEAP_OBJECTS_VISITING_INL_H_
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index c20434a283..7746c91c71 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_VISITING_H_
-#define V8_OBJECTS_VISITING_H_
+#ifndef V8_HEAP_OBJECTS_VISITING_H_
+#define V8_HEAP_OBJECTS_VISITING_H_
#include "src/allocation.h"
#include "src/layout-descriptor.h"
@@ -31,6 +31,7 @@ class JSWeakCollection;
V(Code) \
V(CodeDataContainer) \
V(ConsString) \
+ V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -132,4 +133,4 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_VISITING_H_
+#endif // V8_HEAP_OBJECTS_VISITING_H_
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index cd9c45141d..4e0f259c00 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REMEMBERED_SET_H
-#define V8_REMEMBERED_SET_H
+#ifndef V8_HEAP_REMEMBERED_SET_H_
+#define V8_HEAP_REMEMBERED_SET_H_
#include "src/assembler.h"
#include "src/heap/heap.h"
@@ -298,8 +298,7 @@ class UpdateTypedSlotHelper {
Object* new_target = old_target;
SlotCallbackResult result = callback(&new_target);
if (new_target != old_target) {
- rinfo->set_target_address(old_target->GetIsolate(),
- Code::cast(new_target)->instruction_start());
+ rinfo->set_target_address(Code::cast(new_target)->instruction_start());
}
return result;
}
@@ -359,4 +358,4 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
} // namespace internal
} // namespace v8
-#endif // V8_REMEMBERED_SET_H
+#endif // V8_HEAP_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index e84659c6d4..34f7bfafc3 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -64,7 +64,7 @@ class V8_EXPORT_PRIVATE ScavengeJob {
static const int kAverageIdleTimeMs = 5;
// The number of bytes to be allocated in new space before the next idle
// task is posted.
- static const size_t kBytesAllocatedBeforeNextIdleTask = 512 * KB;
+ static const size_t kBytesAllocatedBeforeNextIdleTask = 1024 * KB;
// The minimum size of allocated new space objects to trigger a scavenge.
static const size_t kMinAllocationLimit = 512 * KB;
// The allocation limit cannot exceed this fraction of the new space capacity.
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index b61872074e..2971db98cc 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -71,7 +71,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(NEW_SPACE, object_size, alignment);
@@ -97,7 +97,7 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
int object_size) {
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
@@ -228,9 +228,8 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
- HeapObject* dest = first_word.ToForwardingAddress();
- DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
- base::AsAtomicPointer::Relaxed_Store(p, dest);
+ DCHECK(heap()->InFromSpace(*p));
+ *p = first_word.ToForwardingAddress();
return;
}
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index be5fb87a90..3baba9521b 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -97,6 +97,7 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
void Scavenger::ScavengePage(MemoryChunk* page) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::ScavengePage");
CodePageMemoryModificationScope memory_modification_scope(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
@@ -115,6 +116,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
}
void Scavenger::Process(OneshotBarrier* barrier) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::Process");
// Threshold when to switch processing the promotion list to avoid
// allocating too much backing store in the worklist.
const int kProcessPromotionListThreshold = kPromotionListSegmentSize / 2;
@@ -160,12 +162,13 @@ void Scavenger::Finalize() {
allocator_.Finalize();
}
-void RootScavengeVisitor::VisitRootPointer(Root root, Object** p) {
+void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
+ Object** p) {
ScavengePointer(p);
}
-void RootScavengeVisitor::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) ScavengePointer(p);
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 27ae2e8ab7..e0008ae694 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -117,8 +117,9 @@ class RootScavengeVisitor final : public RootVisitor {
RootScavengeVisitor(Heap* heap, Scavenger* scavenger)
: heap_(heap), scavenger_(scavenger) {}
- void VisitRootPointer(Root root, Object** p) final;
- void VisitRootPointers(Root root, Object** start, Object** end) final;
+ void VisitRootPointer(Root root, const char* description, Object** p) final;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) final;
private:
void ScavengePointer(Object** p);
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 9e2d7e6354..8a7aca1694 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -222,7 +222,7 @@ bool Heap::CreateInitialMaps() {
(constructor_function_index)); \
}
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+ ALLOCATE_VARSIZE_MAP(SCOPE_INFO_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
@@ -289,12 +289,17 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+ // The "no closures" and "one closure" FeedbackCell maps need
+ // to be marked unstable because their objects can change maps.
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_closures_cell)
+ no_closures_cell_map()->mark_unstable();
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
+ one_closure_cell_map()->mark_unstable();
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
+
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
@@ -303,6 +308,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, name_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, global_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, number_dictionary)
+ ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
@@ -475,7 +481,7 @@ void Heap::CreateInitialObjects() {
// Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
- set_code_stubs(*NumberDictionary::New(isolate(), 128));
+ set_code_stubs(*SimpleNumberDictionary::New(isolate(), 128));
{
HandleScope scope(isolate());
@@ -533,7 +539,10 @@ void Heap::CreateInitialObjects() {
set_regexp_multiple_cache(*factory->NewFixedArray(
RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
- set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+ // Allocate FeedbackCell for builtins.
+ Handle<FeedbackCell> many_closures_cell =
+ factory->NewManyClosuresCell(factory->undefined_value());
+ set_many_closures_cell(*many_closures_cell);
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
@@ -638,6 +647,14 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_hook_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_then_protector(*cell);
+
set_serialized_objects(empty_fixed_array());
set_serialized_global_proxy_sizes(empty_fixed_array());
@@ -650,6 +667,9 @@ void Heap::CreateInitialObjects() {
set_deserialize_lazy_handler_wide(Smi::kZero);
set_deserialize_lazy_handler_extra_wide(Smi::kZero);
+ // Initialize builtins constants table.
+ set_builtins_constants_table(empty_fixed_array());
+
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index f1edb6f2fb..7423665bcb 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SLOT_SET_H
-#define V8_SLOT_SET_H
+#ifndef V8_HEAP_SLOT_SET_H_
+#define V8_HEAP_SLOT_SET_H_
#include <map>
#include <stack>
@@ -641,4 +641,4 @@ class TypedSlotSet {
} // namespace internal
} // namespace v8
-#endif // V8_SLOT_SET_H
+#endif // V8_HEAP_SLOT_SET_H_
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 39a62327df..498c34bd54 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
+#include "src/base/v8-fallthrough.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/msan.h"
@@ -137,12 +138,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-void MemoryChunk::InitializeFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
- }
-}
-
bool PagedSpace::Contains(Address addr) {
if (heap_->lo_space()->FindPage(addr)) return false;
return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
@@ -157,6 +152,7 @@ void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
DCHECK_EQ(free_list(), category->owner());
+ category->set_free_list(nullptr);
free_list()->RemoveCategory(category);
});
}
@@ -164,7 +160,8 @@ void PagedSpace::UnlinkFreeListCategories(Page* page) {
size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
- page->ForAllFreeListCategories([&added](FreeListCategory* category) {
+ page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
+ category->set_free_list(&free_list_);
added += category->available();
category->Relink();
});
@@ -230,23 +227,23 @@ MemoryChunk* MemoryChunkIterator::next() {
case kOldSpaceState: {
if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
state_ = kMapState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kMapState: {
if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
state_ = kCodeState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kCodeState: {
if (code_iterator_ != heap_->code_space()->end())
return *(code_iterator_++);
state_ = kLargeObjectState;
- // Fall through.
+ V8_FALLTHROUGH;
}
case kLargeObjectState: {
if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
state_ = kFinishedState;
- // Fall through;
+ V8_FALLTHROUGH;
}
case kFinishedState:
return nullptr;
@@ -256,23 +253,14 @@ MemoryChunk* MemoryChunkIterator::next() {
UNREACHABLE();
}
-Page* FreeListCategory::page() const {
- return Page::FromAddress(
- reinterpret_cast<Address>(const_cast<FreeListCategory*>(this)));
-}
-
Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
return top(type) ? top(type)->page() : nullptr;
}
-FreeList* FreeListCategory::owner() {
- return reinterpret_cast<PagedSpace*>(
- Page::FromAddress(reinterpret_cast<Address>(this))->owner())
- ->free_list();
-}
+FreeList* FreeListCategory::owner() { return free_list_; }
bool FreeListCategory::is_linked() {
- return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
+ return prev_ != nullptr || next_ != nullptr;
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 2dd5e9b24d..d90cac90f2 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -71,6 +71,8 @@ bool HeapObjectIterator::AdvanceToNextPage() {
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
+ DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
+
for (SpaceIterator it(heap_); it.has_next();) {
it.next()->PauseAllocationObservers();
}
@@ -322,7 +324,12 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ unmapper_->active_unmapping_tasks_.Decrement(1);
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(unmapper_->heap_->isolate(),
+ "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
+ }
}
Unmapper* const unmapper_;
@@ -332,13 +339,26 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
- if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
+ if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
+ kMaxUnmapperTasks);
+ }
return;
}
UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
- DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
- task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
+ task->id());
+ }
+ DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
+ DCHECK_LE(active_unmapping_tasks_.Value(), pending_unmapping_tasks_);
+ DCHECK_GE(active_unmapping_tasks_.Value(), 0);
+ active_unmapping_tasks_.Increment(1);
+ task_ids_[pending_unmapping_tasks_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
} else {
@@ -347,18 +367,41 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
void MemoryAllocator::Unmapper::WaitUntilCompleted() {
- for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
+ for (int i = 0; i < pending_unmapping_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
CancelableTaskManager::kTaskAborted) {
pending_unmapping_tasks_semaphore_.Wait();
}
}
- concurrent_unmapping_tasks_active_ = 0;
+ pending_unmapping_tasks_ = 0;
+ active_unmapping_tasks_.SetValue(0);
+
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::WaitUntilCompleted: no tasks remaining\n");
+ }
+}
+
+bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
+ DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
+
+ if (active_unmapping_tasks_.Value() == 0 && pending_unmapping_tasks_ > 0) {
+ // All previous unmapping tasks have been run to completion.
+ // Finalize those tasks to make room for new ones.
+ WaitUntilCompleted();
+ }
+ return pending_unmapping_tasks_ != kMaxUnmapperTasks;
}
template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
MemoryChunk* chunk = nullptr;
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(
+ heap_->isolate(),
+ "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
+ NumberOfChunks());
+ }
// Regular chunks.
while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
@@ -380,7 +423,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, concurrent_unmapping_tasks_active_);
+ CHECK_EQ(0, pending_unmapping_tasks_);
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
@@ -583,7 +626,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
- chunk->InitializeFreeListCategories();
+
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ chunk->categories_[i] = nullptr;
+ }
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
@@ -606,6 +652,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
if (reservation != nullptr) {
chunk->reservation_.TakeControl(reservation);
}
+
return chunk;
}
@@ -615,6 +662,8 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page);
+ page->AllocateFreeListCategories();
+ page->InitializeFreeListCategories();
page->InitializationMemoryFence();
return page;
}
@@ -662,6 +711,28 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
return page;
}
+void Page::AllocateFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ categories_[i] = new FreeListCategory(
+ reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
+ }
+}
+
+void Page::InitializeFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
+ }
+}
+
+void Page::ReleaseFreeListCategories() {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ if (categories_[i] != nullptr) {
+ delete categories_[i];
+ categories_[i] = nullptr;
+ }
+ }
+}
+
Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(!old_page->is_anchor());
DCHECK(old_page->InNewSpace());
@@ -679,6 +750,10 @@ size_t MemoryChunk::CommittedPhysicalMemory() {
return high_water_mark_.Value();
}
+bool MemoryChunk::IsPagedSpace() const {
+ return owner()->identity() != LO_SPACE;
+}
+
void MemoryChunk::InsertAfter(MemoryChunk* other) {
MemoryChunk* other_next = other->next_chunk();
@@ -710,7 +785,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
VirtualMemory reservation;
Address area_start = nullptr;
Address area_end = nullptr;
- void* address_hint = heap->GetRandomMmapAddr();
+ void* address_hint =
+ AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
//
// MemoryChunk layout:
@@ -826,8 +902,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
owner);
}
- return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- executable, owner, &reservation);
+ MemoryChunk* chunk =
+ MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+ executable, owner, &reservation);
+
+ if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
+ return chunk;
}
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
@@ -970,6 +1050,8 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
}
chunk->SetFlag(MemoryChunk::PRE_FREED);
+
+ if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
}
@@ -1005,7 +1087,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
- // Fall through to kPreFreeAndQueue.
+ V8_FALLTHROUGH;
case kPreFreeAndQueue:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
@@ -1198,6 +1280,11 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
+
+ if (IsPagedSpace()) {
+ Page* page = static_cast<Page*>(this);
+ page->ReleaseFreeListCategories();
+ }
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
@@ -1345,12 +1432,17 @@ void Space::ResumeAllocationObservers() {
void Space::AllocationStep(int bytes_since_last, Address soon_object,
int size) {
- if (AllocationObserversActive()) {
- heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
- for (AllocationObserver* observer : allocation_observers_) {
- observer->AllocationStep(bytes_since_last, soon_object, size);
- }
+ if (!AllocationObserversActive()) {
+ return;
+ }
+
+ DCHECK(!heap()->allocation_step_in_progress());
+ heap()->set_allocation_step_in_progress(true);
+ heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
+ for (AllocationObserver* observer : allocation_observers_) {
+ observer->AllocationStep(bytes_since_last, soon_object, size);
}
+ heap()->set_allocation_step_in_progress(false);
}
intptr_t Space::GetNextInlineAllocationStepSize() {
@@ -1359,15 +1451,13 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
: observer->bytes_to_next_step();
}
- DCHECK(allocation_observers_.size() == 0 || next_step != 0);
+ DCHECK(allocation_observers_.size() == 0 || next_step > 0);
return next_step;
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : SpaceWithLinearArea(heap, space, executable),
- anchor_(this),
- free_list_(this) {
+ : SpaceWithLinearArea(heap, space, executable), anchor_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
}
@@ -1570,7 +1660,8 @@ bool PagedSpace::Expand() {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
AddPage(page);
- Free(page->area_start(), page->area_size());
+ Free(page->area_start(), page->area_size(),
+ SpaceAccountingMode::kSpaceAccounted);
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
return true;
}
@@ -1606,7 +1697,8 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
DCHECK_GE(old_limit, new_limit);
if (new_limit != old_limit) {
SetTopAndLimit(top(), new_limit);
- Free(new_limit, old_limit - new_limit);
+ Free(new_limit, old_limit - new_limit,
+ SpaceAccountingMode::kSpaceAccounted);
if (heap()->incremental_marking()->black_allocation()) {
Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
old_limit);
@@ -1692,7 +1784,8 @@ void PagedSpace::FreeLinearAllocationArea() {
InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
- Free(current_top, current_limit - current_top);
+ Free(current_top, current_limit - current_top,
+ SpaceAccountingMode::kSpaceAccounted);
}
void PagedSpace::ReleasePage(Page* page) {
@@ -1722,6 +1815,7 @@ void PagedSpace::ReleasePage(Page* page) {
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
}
@@ -1729,6 +1823,7 @@ void PagedSpace::SetReadAndExecutable() {
void PagedSpace::SetReadAndWritable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndWritable();
}
}
@@ -1786,7 +1881,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
- Free(limit, end - limit);
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
@@ -2078,22 +2173,21 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
}
void NewSpace::UpdateLinearAllocationArea() {
- Address old_top = top();
- Address new_top = to_space_.page_low();
+ // Make sure there is no unaccounted allocations.
+ DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
+ Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
original_top_.SetValue(top());
original_limit_.SetValue(limit());
- UpdateInlineAllocationLimit(0);
- // TODO(ofrobots): It would be more correct to do a step before setting the
- // limit on the new allocation area. However, fixing this causes a regression
- // due to the idle scavenger getting pinged too frequently. crbug.com/795323.
- InlineAllocationStep(old_top, new_top, nullptr, 0);
+ StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetLinearAllocationArea() {
+ // Do a step to account for memory allocated so far before resetting.
+ InlineAllocationStep(top(), top(), nullptr, 0);
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
@@ -2121,6 +2215,10 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!Page::IsAtObjectStart(top));
+
+ // Do a step to account for memory allocated on previous page.
+ InlineAllocationStep(top, top, nullptr, 0);
+
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
@@ -2176,6 +2274,11 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
+ if (heap()->allocation_step_in_progress()) {
+ // If we are mid-way through an existing step, don't start a new one.
+ return;
+ }
+
if (AllocationObserversActive()) {
top_on_previous_step_ = top();
UpdateInlineAllocationLimit(0);
@@ -2217,6 +2320,11 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
Address top_for_next_step,
Address soon_object,
size_t size) {
+ if (heap()->allocation_step_in_progress()) {
+ // Avoid starting a new step if we are mid-way through an existing one.
+ return;
+ }
+
if (top_on_previous_step_) {
if (top < top_on_previous_step_) {
// Generated code decreased the top pointer to do folded allocations.
@@ -2608,7 +2716,6 @@ void FreeListCategory::Reset() {
FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* node = top();
if (node == nullptr) return nullptr;
set_top(node->next());
@@ -2620,10 +2727,9 @@ FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* node = PickNodeFromList(node_size);
if ((node != nullptr) && (*node_size < minimum_size)) {
- Free(node, *node_size, kLinkCategory);
+ Free(node->address(), *node_size, kLinkCategory);
*node_size = 0;
return nullptr;
}
@@ -2633,7 +2739,6 @@ FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
-
FreeSpace* prev_non_evac_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
@@ -2656,9 +2761,10 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
return nullptr;
}
-void FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
+void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
- CHECK(page()->CanAllocate());
+ DCHECK(page()->CanAllocate());
+ FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space->set_next(top());
set_top(free_space);
available_ += size_in_bytes;
@@ -2686,7 +2792,7 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
+FreeList::FreeList() : wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i] = nullptr;
}
@@ -2704,11 +2810,6 @@ void FreeList::Reset() {
}
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
- if (size_in_bytes == 0) return 0;
-
- owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
- ClearRecordedSlots::kNo);
-
Page* page = Page::FromAddress(start);
page->DecreaseAllocatedBytes(size_in_bytes);
@@ -2719,11 +2820,10 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return size_in_bytes;
}
- FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(free_space, size_in_bytes, mode);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode);
DCHECK_EQ(page->AvailableInFreeList(),
page->AvailableInFreeListFromAllocatedBytes());
return 0;
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 08fef7d6e3..1c8bad8dc5 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -139,6 +139,8 @@ enum FreeListCategoryType {
enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
+
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
@@ -148,15 +150,10 @@ enum RememberedSetType {
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- static const int kSize = kIntSize + // FreeListCategoryType type_
- kIntSize + // padding for type_
- kSizetSize + // size_t available_
- kPointerSize + // FreeSpace* top_
- kPointerSize + // FreeListCategory* prev_
- kPointerSize; // FreeListCategory* next_
-
- FreeListCategory()
- : type_(kInvalidCategory),
+ FreeListCategory(FreeList* free_list, Page* page)
+ : free_list_(free_list),
+ page_(page),
+ type_(kInvalidCategory),
available_(0),
top_(nullptr),
prev_(nullptr),
@@ -180,7 +177,7 @@ class FreeListCategory {
// category is currently unlinked.
void Relink();
- void Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
+ void Free(Address address, size_t size_in_bytes, FreeMode mode);
// Picks a node from the list and stores its size in |node_size|. Returns
// nullptr if the category is empty.
@@ -196,11 +193,13 @@ class FreeListCategory {
FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
- inline Page* page() const;
+ inline Page* page() const { return page_; }
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
size_t available() const { return available_; }
+ void set_free_list(FreeList* free_list) { free_list_ = free_list; }
+
#ifdef DEBUG
size_t SumFreeList();
int FreeListLength();
@@ -218,6 +217,12 @@ class FreeListCategory {
FreeListCategory* next() { return next_; }
void set_next(FreeListCategory* next) { next_ = next; }
+ // This FreeListCategory is owned by the given free_list_.
+ FreeList* free_list_;
+
+ // This FreeListCategory holds free list entries of the given page_.
+ Page* const page_;
+
// |type_|: The type of this free list category.
FreeListCategoryType type_;
@@ -233,6 +238,8 @@ class FreeListCategory {
friend class FreeList;
friend class PagedSpace;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
// MemoryChunk represents a memory region owned by a specific space.
@@ -370,7 +377,7 @@ class MemoryChunk {
+ kSizetSize // size_t wasted_memory_
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize // AtomicValue prev_chunk_
- + FreeListCategory::kSize * kNumberOfCategories
+ + kPointerSize * kNumberOfCategories
// FreeListCategory categories_[kNumberOfCategories]
+ kPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // intptr_t young_generation_live_byte_count_
@@ -610,6 +617,8 @@ class MemoryChunk {
void set_owner(Space* space) { owner_.SetValue(space); }
+ bool IsPagedSpace() const;
+
void InsertAfter(MemoryChunk* other);
void Unlink();
@@ -620,8 +629,6 @@ class MemoryChunk {
void SetReadAndExecutable();
void SetReadAndWritable();
- inline void InitializeFreeListCategories();
-
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -699,7 +706,7 @@ class MemoryChunk {
// prev_chunk_ holds a pointer of type MemoryChunk
base::AtomicValue<MemoryChunk*> prev_chunk_;
- FreeListCategory categories_[kNumberOfCategories];
+ FreeListCategory* categories_[kNumberOfCategories];
LocalArrayBufferTracker* local_tracker_;
@@ -788,7 +795,7 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- callback(&categories_[i]);
+ callback(categories_[i]);
}
}
@@ -820,7 +827,7 @@ class Page : public MemoryChunk {
}
FreeListCategory* free_list_category(FreeListCategoryType type) {
- return &categories_[type];
+ return categories_[type];
}
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
@@ -845,6 +852,10 @@ class Page : public MemoryChunk {
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
void DestroyBlackArea(Address start, Address end);
+ void InitializeFreeListCategories();
+ void AllocateFreeListCategories();
+ void ReleaseFreeListCategories();
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -1170,14 +1181,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
: heap_(heap),
allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
- concurrent_unmapping_tasks_active_(0) {
+ pending_unmapping_tasks_(0),
+ active_unmapping_tasks_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
- if ((chunk->size() == Page::kPageSize) &&
- (chunk->executable() != EXECUTABLE)) {
+ if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe<kRegular>(chunk);
} else {
AddMemoryChunkSafe<kNonRegular>(chunk);
@@ -1238,6 +1249,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
+ bool MakeRoomForNewTasks();
+
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
@@ -1247,7 +1260,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_;
- intptr_t concurrent_unmapping_tasks_active_;
+ intptr_t pending_unmapping_tasks_;
+ base::AtomicNumber<intptr_t> active_unmapping_tasks_;
friend class MemoryAllocator;
};
@@ -1359,6 +1373,12 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// and false otherwise.
bool CommitBlock(Address start, size_t size, Executability executable);
+ // Checks if an allocated MemoryChunk was intended to be used for executable
+ // memory.
+ bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
+ return executable_memory_.find(chunk) != executable_memory_.end();
+ }
+
// Uncommit a contiguous block of memory [start..(start+size)[.
// start is not nullptr, the size is greater than zero, and the
// block is contained in the initial chunk. Returns true if it succeeded
@@ -1409,6 +1429,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
+ void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.insert(chunk);
+ }
+
+ void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.erase(chunk);
+ }
+
Isolate* isolate_;
CodeRange* code_range_;
@@ -1431,6 +1462,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
VirtualMemory last_chunk_;
Unmapper unmapper_;
+ // Data structure to remember allocated executable memory chunks.
+ std::unordered_set<MemoryChunk*> executable_memory_;
+
friend class heap::TestCodeRangeScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
@@ -1731,7 +1765,7 @@ class V8_EXPORT_PRIVATE FreeList {
return kHuge;
}
- explicit FreeList(PagedSpace* owner);
+ FreeList();
// Adds a node on the free list. The block of size {size_in_bytes} starting
// at {start} is placed on the free list. The return value is the number of
@@ -1779,7 +1813,6 @@ class V8_EXPORT_PRIVATE FreeList {
size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
- PagedSpace* owner() { return owner_; }
size_t wasted_bytes() { return wasted_bytes_.Value(); }
template <typename Callback>
@@ -1874,13 +1907,10 @@ class V8_EXPORT_PRIVATE FreeList {
return categories_[type];
}
- PagedSpace* owner_;
base::AtomicNumber<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
// LocalAllocationBuffer represents a linear allocation area that is created
@@ -2086,11 +2116,22 @@ class V8_EXPORT_PRIVATE PagedSpace
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
+ size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
+ if (size_in_bytes == 0) return 0;
+ heap_->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
+ ClearRecordedSlots::kNo);
+ if (mode == SpaceAccountingMode::kSpaceAccounted) {
+ return AccountedFree(start, size_in_bytes);
+ } else {
+ return UnaccountedFree(start, size_in_bytes);
+ }
+ }
+
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
- size_t Free(Address start, size_t size_in_bytes) {
+ size_t AccountedFree(Address start, size_t size_in_bytes) {
size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index a69abcc886..58f47f4834 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STORE_BUFFER_H_
-#define V8_STORE_BUFFER_H_
+#ifndef V8_HEAP_STORE_BUFFER_H_
+#define V8_HEAP_STORE_BUFFER_H_
#include "src/allocation.h"
#include "src/base/logging.h"
@@ -225,4 +225,4 @@ class StoreBuffer {
} // namespace internal
} // namespace v8
-#endif // V8_STORE_BUFFER_H_
+#endif // V8_HEAP_STORE_BUFFER_H_
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
index b97c2b179c..37ebb82197 100644
--- a/deps/v8/src/heap/stress-marking-observer.h
+++ b/deps/v8/src/heap/stress-marking-observer.h
@@ -23,4 +23,4 @@ class StressMarkingObserver : public AllocationObserver {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_HEAP_STRESS_MARKING_OBSERVER_H_
diff --git a/deps/v8/src/heap/stress-scavenge-observer.h b/deps/v8/src/heap/stress-scavenge-observer.h
index 6f69afe4c5..b39b2eac59 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.h
+++ b/deps/v8/src/heap/stress-scavenge-observer.h
@@ -36,4 +36,4 @@ class StressScavengeObserver : public AllocationObserver {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_HEAP_STRESS_SCAVENGE_OBSERVER_H_
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 25ba0df8fd..2072e407e9 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -279,8 +279,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
+ free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
@@ -318,8 +318,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
memset(free_start, 0xCC, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
- free_start, size);
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
+ free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
index 3421e16611..bb3eae2228 100644
--- a/deps/v8/src/heap/worklist.h
+++ b/deps/v8/src/heap/worklist.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_WORKLIST_
-#define V8_HEAP_WORKLIST_
+#ifndef V8_HEAP_WORKLIST_H_
+#define V8_HEAP_WORKLIST_H_
#include <cstddef>
#include <utility>
@@ -388,4 +388,4 @@ class Worklist {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_WORKLIST_
+#endif // V8_HEAP_WORKLIST_H_
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 368addd718..b89dceb786 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -46,13 +46,10 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
-static const byte kCallOpcode = 0xE8;
-static const int kNoCodeAgeSequenceLength = 5;
-
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
@@ -105,7 +102,7 @@ void RelocInfo::set_target_object(HeapObject* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@@ -138,22 +135,22 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
@@ -161,11 +158,11 @@ void RelocInfo::WipeOut(Isolate* isolate) {
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
@@ -249,15 +246,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, p, sizeof(int32_t));
+ Assembler::FlushICache(p, sizeof(int32_t));
}
}
@@ -266,8 +261,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
@@ -299,9 +294,8 @@ void Assembler::emit_near_disp(Label* L) {
*pc_++ = disp;
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -321,12 +315,6 @@ void Operand::set_disp8(int8_t disp) {
*reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
}
-
-Operand::Operand(Immediate imm) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(imm.immediate(), imm.rmode_);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 38508c7632..a1b8dada6e 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -105,7 +105,7 @@ bool OSHasAVXSupport() {
size_t buffer_size = arraysize(buffer);
int ctl_name[] = {CTL_KERN, KERN_OSRELEASE};
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ FATAL("V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
// XX is the major kernel version component.
@@ -207,26 +207,26 @@ Address RelocInfo::embedded_address() const { return Memory::Address_at(pc_); }
uint32_t RelocInfo::embedded_size() const { return Memory::uint32_at(pc_); }
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
+void RelocInfo::set_embedded_size(uint32_t size,
ICacheFlushMode icache_flush_mode) {
Memory::uint32_at(pc_) = size;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(uint32_t));
+ Assembler::FlushICache(pc_, sizeof(uint32_t));
}
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
@@ -411,6 +411,7 @@ void Assembler::Nop(int bytes) {
switch (bytes) {
case 2:
EMIT(0x66);
+ V8_FALLTHROUGH;
case 1:
EMIT(0x90);
return;
@@ -427,6 +428,7 @@ void Assembler::Nop(int bytes) {
return;
case 6:
EMIT(0x66);
+ V8_FALLTHROUGH;
case 5:
EMIT(0xF);
EMIT(0x1F);
@@ -447,12 +449,15 @@ void Assembler::Nop(int bytes) {
case 11:
EMIT(0x66);
bytes--;
+ V8_FALLTHROUGH;
case 10:
EMIT(0x66);
bytes--;
+ V8_FALLTHROUGH;
case 9:
EMIT(0x66);
bytes--;
+ V8_FALLTHROUGH;
case 8:
EMIT(0xF);
EMIT(0x1F);
@@ -528,8 +533,7 @@ void Assembler::push(Register src) {
EMIT(0x50 | src.code());
}
-
-void Assembler::push(const Operand& src) {
+void Assembler::push(Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(esi, src);
@@ -542,8 +546,7 @@ void Assembler::pop(Register dst) {
EMIT(0x58 | dst.code());
}
-
-void Assembler::pop(const Operand& dst) {
+void Assembler::pop(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0x8F);
emit_operand(eax, dst);
@@ -563,48 +566,42 @@ void Assembler::leave() {
EMIT(0xC9);
}
-
-void Assembler::mov_b(Register dst, const Operand& src) {
+void Assembler::mov_b(Register dst, Operand src) {
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x8A);
emit_operand(dst, src);
}
-
-void Assembler::mov_b(const Operand& dst, const Immediate& src) {
+void Assembler::mov_b(Operand dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0xC6);
emit_operand(eax, dst);
EMIT(static_cast<int8_t>(src.immediate()));
}
-
-void Assembler::mov_b(const Operand& dst, Register src) {
+void Assembler::mov_b(Operand dst, Register src) {
CHECK(src.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x88);
emit_operand(src, dst);
}
-
-void Assembler::mov_w(Register dst, const Operand& src) {
+void Assembler::mov_w(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x8B);
emit_operand(dst, src);
}
-
-void Assembler::mov_w(const Operand& dst, Register src) {
+void Assembler::mov_w(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x89);
emit_operand(src, dst);
}
-
-void Assembler::mov_w(const Operand& dst, const Immediate& src) {
+void Assembler::mov_w(Operand dst, const Immediate& src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0xC7);
@@ -633,8 +630,7 @@ void Assembler::mov(Register dst, Handle<HeapObject> handle) {
emit(handle);
}
-
-void Assembler::mov(Register dst, const Operand& src) {
+void Assembler::mov(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x8B);
emit_operand(dst, src);
@@ -647,62 +643,62 @@ void Assembler::mov(Register dst, Register src) {
EMIT(0xC0 | src.code() << 3 | dst.code());
}
-
-void Assembler::mov(const Operand& dst, const Immediate& x) {
+void Assembler::mov(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
EMIT(0xC7);
emit_operand(eax, dst);
emit(x);
}
-void Assembler::mov(const Operand& dst, Handle<HeapObject> handle) {
+void Assembler::mov(Operand dst, Address src, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
EMIT(0xC7);
emit_operand(eax, dst);
- emit(handle);
+ emit(reinterpret_cast<uint32_t>(src), rmode);
}
+void Assembler::mov(Operand dst, Handle<HeapObject> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ emit(handle);
+}
-void Assembler::mov(const Operand& dst, Register src) {
+void Assembler::mov(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x89);
emit_operand(src, dst);
}
-
-void Assembler::movsx_b(Register dst, const Operand& src) {
+void Assembler::movsx_b(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBE);
emit_operand(dst, src);
}
-
-void Assembler::movsx_w(Register dst, const Operand& src) {
+void Assembler::movsx_w(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBF);
emit_operand(dst, src);
}
-
-void Assembler::movzx_b(Register dst, const Operand& src) {
+void Assembler::movzx_b(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xB6);
emit_operand(dst, src);
}
-
-void Assembler::movzx_w(Register dst, const Operand& src) {
+void Assembler::movzx_w(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xB7);
emit_operand(dst, src);
}
-
-void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
+void Assembler::cmov(Condition cc, Register dst, Operand src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
EMIT(0x0F);
@@ -747,21 +743,20 @@ void Assembler::xchg(Register dst, Register src) {
}
}
-
-void Assembler::xchg(Register dst, const Operand& src) {
+void Assembler::xchg(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x87);
emit_operand(dst, src);
}
-void Assembler::xchg_b(Register reg, const Operand& op) {
+void Assembler::xchg_b(Register reg, Operand op) {
DCHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x86);
emit_operand(reg, op);
}
-void Assembler::xchg_w(Register reg, const Operand& op) {
+void Assembler::xchg_w(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x87);
@@ -773,14 +768,14 @@ void Assembler::lock() {
EMIT(0xF0);
}
-void Assembler::cmpxchg(const Operand& dst, Register src) {
+void Assembler::cmpxchg(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xB1);
emit_operand(src, dst);
}
-void Assembler::cmpxchg_b(const Operand& dst, Register src) {
+void Assembler::cmpxchg_b(Operand dst, Register src) {
DCHECK(src.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -788,7 +783,7 @@ void Assembler::cmpxchg_b(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::cmpxchg_w(const Operand& dst, Register src) {
+void Assembler::cmpxchg_w(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -803,34 +798,36 @@ void Assembler::lfence() {
EMIT(0xE8);
}
+void Assembler::pause() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x90);
+}
+
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
emit_arith(2, Operand(dst), Immediate(imm32));
}
-
-void Assembler::adc(Register dst, const Operand& src) {
+void Assembler::adc(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x13);
emit_operand(dst, src);
}
-
-void Assembler::add(Register dst, const Operand& src) {
+void Assembler::add(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x03);
emit_operand(dst, src);
}
-
-void Assembler::add(const Operand& dst, Register src) {
+void Assembler::add(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x01);
emit_operand(src, dst);
}
-
-void Assembler::add(const Operand& dst, const Immediate& x) {
+void Assembler::add(Operand dst, const Immediate& x) {
DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
emit_arith(0, dst, x);
@@ -847,27 +844,24 @@ void Assembler::and_(Register dst, const Immediate& x) {
emit_arith(4, Operand(dst), x);
}
-
-void Assembler::and_(Register dst, const Operand& src) {
+void Assembler::and_(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x23);
emit_operand(dst, src);
}
-
-void Assembler::and_(const Operand& dst, const Immediate& x) {
+void Assembler::and_(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(4, dst, x);
}
-
-void Assembler::and_(const Operand& dst, Register src) {
+void Assembler::and_(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x21);
emit_operand(src, dst);
}
-void Assembler::cmpb(const Operand& op, Immediate imm8) {
+void Assembler::cmpb(Operand op, Immediate imm8) {
DCHECK(imm8.is_int8() || imm8.is_uint8());
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
@@ -879,24 +873,21 @@ void Assembler::cmpb(const Operand& op, Immediate imm8) {
emit_b(imm8);
}
-
-void Assembler::cmpb(const Operand& op, Register reg) {
+void Assembler::cmpb(Operand op, Register reg) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x38);
emit_operand(reg, op);
}
-
-void Assembler::cmpb(Register reg, const Operand& op) {
+void Assembler::cmpb(Register reg, Operand op) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x3A);
emit_operand(reg, op);
}
-
-void Assembler::cmpw(const Operand& op, Immediate imm16) {
+void Assembler::cmpw(Operand op, Immediate imm16) {
DCHECK(imm16.is_int16() || imm16.is_uint16());
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -905,14 +896,14 @@ void Assembler::cmpw(const Operand& op, Immediate imm16) {
emit_w(imm16);
}
-void Assembler::cmpw(Register reg, const Operand& op) {
+void Assembler::cmpw(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x3B);
emit_operand(reg, op);
}
-void Assembler::cmpw(const Operand& op, Register reg) {
+void Assembler::cmpw(Operand op, Register reg) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x39);
@@ -929,38 +920,35 @@ void Assembler::cmp(Register reg, Handle<HeapObject> handle) {
emit_arith(7, Operand(reg), Immediate(handle));
}
-
-void Assembler::cmp(Register reg, const Operand& op) {
+void Assembler::cmp(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x3B);
emit_operand(reg, op);
}
-void Assembler::cmp(const Operand& op, Register reg) {
+void Assembler::cmp(Operand op, Register reg) {
EnsureSpace ensure_space(this);
EMIT(0x39);
emit_operand(reg, op);
}
-void Assembler::cmp(const Operand& op, const Immediate& imm) {
+void Assembler::cmp(Operand op, const Immediate& imm) {
EnsureSpace ensure_space(this);
emit_arith(7, op, imm);
}
-void Assembler::cmp(const Operand& op, Handle<HeapObject> handle) {
+void Assembler::cmp(Operand op, Handle<HeapObject> handle) {
EnsureSpace ensure_space(this);
emit_arith(7, op, Immediate(handle));
}
-
-void Assembler::cmpb_al(const Operand& op) {
+void Assembler::cmpb_al(Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x38); // CMP r/m8, r8
emit_operand(eax, op); // eax has same code as register al.
}
-
-void Assembler::cmpw_ax(const Operand& op) {
+void Assembler::cmpw_ax(Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x39); // CMP r/m16, r16
@@ -975,8 +963,7 @@ void Assembler::dec_b(Register dst) {
EMIT(0xC8 | dst.code());
}
-
-void Assembler::dec_b(const Operand& dst) {
+void Assembler::dec_b(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xFE);
emit_operand(ecx, dst);
@@ -988,8 +975,7 @@ void Assembler::dec(Register dst) {
EMIT(0x48 | dst.code());
}
-
-void Assembler::dec(const Operand& dst) {
+void Assembler::dec(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(ecx, dst);
@@ -1001,15 +987,13 @@ void Assembler::cdq() {
EMIT(0x99);
}
-
-void Assembler::idiv(const Operand& src) {
+void Assembler::idiv(Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(edi, src);
}
-
-void Assembler::div(const Operand& src) {
+void Assembler::div(Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(esi, src);
@@ -1022,8 +1006,7 @@ void Assembler::imul(Register reg) {
EMIT(0xE8 | reg.code());
}
-
-void Assembler::imul(Register dst, const Operand& src) {
+void Assembler::imul(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xAF);
@@ -1035,8 +1018,7 @@ void Assembler::imul(Register dst, Register src, int32_t imm32) {
imul(dst, Operand(src), imm32);
}
-
-void Assembler::imul(Register dst, const Operand& src, int32_t imm32) {
+void Assembler::imul(Register dst, Operand src, int32_t imm32) {
EnsureSpace ensure_space(this);
if (is_int8(imm32)) {
EMIT(0x6B);
@@ -1055,15 +1037,13 @@ void Assembler::inc(Register dst) {
EMIT(0x40 | dst.code());
}
-
-void Assembler::inc(const Operand& dst) {
+void Assembler::inc(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(eax, dst);
}
-
-void Assembler::lea(Register dst, const Operand& src) {
+void Assembler::lea(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x8D);
emit_operand(dst, src);
@@ -1083,8 +1063,7 @@ void Assembler::neg(Register dst) {
EMIT(0xD8 | dst.code());
}
-
-void Assembler::neg(const Operand& dst) {
+void Assembler::neg(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(ebx, dst);
@@ -1097,8 +1076,7 @@ void Assembler::not_(Register dst) {
EMIT(0xD0 | dst.code());
}
-
-void Assembler::not_(const Operand& dst) {
+void Assembler::not_(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(edx, dst);
@@ -1110,21 +1088,18 @@ void Assembler::or_(Register dst, int32_t imm32) {
emit_arith(1, Operand(dst), Immediate(imm32));
}
-
-void Assembler::or_(Register dst, const Operand& src) {
+void Assembler::or_(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0B);
emit_operand(dst, src);
}
-
-void Assembler::or_(const Operand& dst, const Immediate& x) {
+void Assembler::or_(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(1, dst, x);
}
-
-void Assembler::or_(const Operand& dst, Register src) {
+void Assembler::or_(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x09);
emit_operand(src, dst);
@@ -1158,8 +1133,7 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
-
-void Assembler::ror(const Operand& dst, uint8_t imm8) {
+void Assembler::ror(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1172,15 +1146,13 @@ void Assembler::ror(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::ror_cl(const Operand& dst) {
+void Assembler::ror_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(ecx, dst);
}
-
-void Assembler::sar(const Operand& dst, uint8_t imm8) {
+void Assembler::sar(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1193,14 +1165,13 @@ void Assembler::sar(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::sar_cl(const Operand& dst) {
+void Assembler::sar_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(edi, dst);
}
-void Assembler::sbb(Register dst, const Operand& src) {
+void Assembler::sbb(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x1B);
emit_operand(dst, src);
@@ -1222,8 +1193,7 @@ void Assembler::shld_cl(Register dst, Register src) {
emit_operand(src, Operand(dst));
}
-
-void Assembler::shl(const Operand& dst, uint8_t imm8) {
+void Assembler::shl(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1236,14 +1206,13 @@ void Assembler::shl(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::shl_cl(const Operand& dst) {
+void Assembler::shl_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(esp, dst);
}
-void Assembler::shr(const Operand& dst, uint8_t imm8) {
+void Assembler::shr(Operand dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
@@ -1256,8 +1225,7 @@ void Assembler::shr(const Operand& dst, uint8_t imm8) {
}
}
-
-void Assembler::shr_cl(const Operand& dst) {
+void Assembler::shr_cl(Operand dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
emit_operand(ebp, dst);
@@ -1272,32 +1240,37 @@ void Assembler::shrd(Register dst, Register src, uint8_t shift) {
EMIT(shift);
}
-void Assembler::shrd_cl(const Operand& dst, Register src) {
+void Assembler::shrd_cl(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xAD);
emit_operand(src, dst);
}
-void Assembler::sub(const Operand& dst, const Immediate& x) {
+void Assembler::sub(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(5, dst, x);
}
-
-void Assembler::sub(Register dst, const Operand& src) {
+void Assembler::sub(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x2B);
emit_operand(dst, src);
}
-
-void Assembler::sub(const Operand& dst, Register src) {
+void Assembler::sub(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x29);
emit_operand(src, dst);
}
+void Assembler::sub_sp_32(uint32_t imm) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x81); // using a literal 32-bit immediate.
+ static constexpr Register ireg = Register::from_code<5>();
+ emit_operand(ireg, Operand(esp));
+ emit(imm);
+}
void Assembler::test(Register reg, const Immediate& imm) {
if (imm.is_uint8()) {
@@ -1317,23 +1290,20 @@ void Assembler::test(Register reg, const Immediate& imm) {
emit(imm);
}
-
-void Assembler::test(Register reg, const Operand& op) {
+void Assembler::test(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x85);
emit_operand(reg, op);
}
-
-void Assembler::test_b(Register reg, const Operand& op) {
+void Assembler::test_b(Register reg, Operand op) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x84);
emit_operand(reg, op);
}
-
-void Assembler::test(const Operand& op, const Immediate& imm) {
+void Assembler::test(Operand op, const Immediate& imm) {
if (op.is_reg_only()) {
test(op.reg(), imm);
return;
@@ -1365,7 +1335,7 @@ void Assembler::test_b(Register reg, Immediate imm8) {
}
}
-void Assembler::test_b(const Operand& op, Immediate imm8) {
+void Assembler::test_b(Operand op, Immediate imm8) {
if (op.is_reg_only()) {
test_b(op.reg(), imm8);
return;
@@ -1390,14 +1360,14 @@ void Assembler::test_w(Register reg, Immediate imm16) {
}
}
-void Assembler::test_w(Register reg, const Operand& op) {
+void Assembler::test_w(Register reg, Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x85);
emit_operand(reg, op);
}
-void Assembler::test_w(const Operand& op, Immediate imm16) {
+void Assembler::test_w(Operand op, Immediate imm16) {
DCHECK(imm16.is_int16() || imm16.is_uint16());
if (op.is_reg_only()) {
test_w(op.reg(), imm16);
@@ -1415,52 +1385,45 @@ void Assembler::xor_(Register dst, int32_t imm32) {
emit_arith(6, Operand(dst), Immediate(imm32));
}
-
-void Assembler::xor_(Register dst, const Operand& src) {
+void Assembler::xor_(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x33);
emit_operand(dst, src);
}
-
-void Assembler::xor_(const Operand& dst, Register src) {
+void Assembler::xor_(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x31);
emit_operand(src, dst);
}
-
-void Assembler::xor_(const Operand& dst, const Immediate& x) {
+void Assembler::xor_(Operand dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(6, dst, x);
}
-
-void Assembler::bt(const Operand& dst, Register src) {
+void Assembler::bt(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA3);
emit_operand(src, dst);
}
-
-void Assembler::bts(const Operand& dst, Register src) {
+void Assembler::bts(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xAB);
emit_operand(src, dst);
}
-
-void Assembler::bsr(Register dst, const Operand& src) {
+void Assembler::bsr(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBD);
emit_operand(dst, src);
}
-
-void Assembler::bsf(Register dst, const Operand& src) {
+void Assembler::bsf(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xBC);
@@ -1650,13 +1613,12 @@ void Assembler::wasm_call(Address entry, RelocInfo::Mode rmode) {
emit(reinterpret_cast<intptr_t>(entry), rmode);
}
-int Assembler::CallSize(const Operand& adr) {
+int Assembler::CallSize(Operand adr) {
// Call size is 1 (opcode) + adr.len_ (operand).
return 1 + adr.len_;
}
-
-void Assembler::call(const Operand& adr) {
+void Assembler::call(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(edx, adr);
@@ -1729,8 +1691,7 @@ void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
}
}
-
-void Assembler::jmp(const Operand& adr) {
+void Assembler::jmp(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(esp, adr);
@@ -1854,94 +1815,81 @@ void Assembler::fldln2() {
EMIT(0xED);
}
-
-void Assembler::fld_s(const Operand& adr) {
+void Assembler::fld_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(eax, adr);
}
-
-void Assembler::fld_d(const Operand& adr) {
+void Assembler::fld_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(eax, adr);
}
-
-void Assembler::fstp_s(const Operand& adr) {
+void Assembler::fstp_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(ebx, adr);
}
-
-void Assembler::fst_s(const Operand& adr) {
+void Assembler::fst_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(edx, adr);
}
-
-void Assembler::fstp_d(const Operand& adr) {
+void Assembler::fstp_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ebx, adr);
}
-
-void Assembler::fst_d(const Operand& adr) {
+void Assembler::fst_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(edx, adr);
}
-
-void Assembler::fild_s(const Operand& adr) {
+void Assembler::fild_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(eax, adr);
}
-
-void Assembler::fild_d(const Operand& adr) {
+void Assembler::fild_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDF);
emit_operand(ebp, adr);
}
-
-void Assembler::fistp_s(const Operand& adr) {
+void Assembler::fistp_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ebx, adr);
}
-
-void Assembler::fisttp_s(const Operand& adr) {
+void Assembler::fisttp_s(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ecx, adr);
}
-
-void Assembler::fisttp_d(const Operand& adr) {
+void Assembler::fisttp_d(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ecx, adr);
}
-
-void Assembler::fist_s(const Operand& adr) {
+void Assembler::fist_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(edx, adr);
}
-
-void Assembler::fistp_d(const Operand& adr) {
+void Assembler::fistp_d(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDF);
emit_operand(edi, adr);
@@ -2034,8 +1982,7 @@ void Assembler::fsub_i(int i) {
emit_farith(0xD8, 0xE0, i);
}
-
-void Assembler::fisub_s(const Operand& adr) {
+void Assembler::fisub_s(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xDA);
emit_operand(esp, adr);
@@ -2211,8 +2158,7 @@ void Assembler::setcc(Condition cc, Register reg) {
EMIT(0xC0 | reg.code());
}
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
+void Assembler::cvttss2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2220,8 +2166,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
+void Assembler::cvttsd2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2238,8 +2183,7 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsi2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsi2ss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2247,8 +2191,7 @@ void Assembler::cvtsi2ss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsi2sd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2256,8 +2199,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2265,8 +2207,7 @@ void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2274,14 +2215,14 @@ void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5B);
emit_sse_operand(dst, src);
}
-void Assembler::cvttps2dq(XMMRegister dst, const Operand& src) {
+void Assembler::cvttps2dq(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2289,7 +2230,7 @@ void Assembler::cvttps2dq(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
+void Assembler::addsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2297,8 +2238,7 @@ void Assembler::addsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+void Assembler::mulsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2306,8 +2246,7 @@ void Assembler::mulsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subsd(XMMRegister dst, const Operand& src) {
+void Assembler::subsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2315,8 +2254,7 @@ void Assembler::subsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divsd(XMMRegister dst, const Operand& src) {
+void Assembler::divsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2324,8 +2262,7 @@ void Assembler::divsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+void Assembler::xorpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2333,91 +2270,84 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::andps(XMMRegister dst, const Operand& src) {
+void Assembler::andps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x54);
emit_sse_operand(dst, src);
}
-
-void Assembler::orps(XMMRegister dst, const Operand& src) {
+void Assembler::orps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x56);
emit_sse_operand(dst, src);
}
-
-void Assembler::xorps(XMMRegister dst, const Operand& src) {
+void Assembler::xorps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x57);
emit_sse_operand(dst, src);
}
-
-void Assembler::addps(XMMRegister dst, const Operand& src) {
+void Assembler::addps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x58);
emit_sse_operand(dst, src);
}
-
-void Assembler::subps(XMMRegister dst, const Operand& src) {
+void Assembler::subps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5C);
emit_sse_operand(dst, src);
}
-
-void Assembler::mulps(XMMRegister dst, const Operand& src) {
+void Assembler::mulps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x59);
emit_sse_operand(dst, src);
}
-
-void Assembler::divps(XMMRegister dst, const Operand& src) {
+void Assembler::divps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5E);
emit_sse_operand(dst, src);
}
-void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+void Assembler::rcpps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x53);
emit_sse_operand(dst, src);
}
-void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+void Assembler::rsqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x52);
emit_sse_operand(dst, src);
}
-void Assembler::minps(XMMRegister dst, const Operand& src) {
+void Assembler::minps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5D);
emit_sse_operand(dst, src);
}
-void Assembler::maxps(XMMRegister dst, const Operand& src) {
+void Assembler::maxps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5F);
emit_sse_operand(dst, src);
}
-void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
+void Assembler::cmpps(XMMRegister dst, Operand src, int8_t cmp) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xC2);
@@ -2425,7 +2355,7 @@ void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
EMIT(cmp);
}
-void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2433,7 +2363,7 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::haddps(XMMRegister dst, const Operand& src) {
+void Assembler::haddps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2458,8 +2388,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+void Assembler::ucomisd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2510,8 +2439,7 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+void Assembler::maxsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2519,8 +2447,7 @@ void Assembler::maxsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minsd(XMMRegister dst, const Operand& src) {
+void Assembler::minsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2553,14 +2480,14 @@ void Assembler::movups(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::movups(XMMRegister dst, const Operand& src) {
+void Assembler::movups(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x10);
emit_sse_operand(dst, src);
}
-void Assembler::movups(const Operand& dst, XMMRegister src) {
+void Assembler::movups(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x11);
@@ -2576,8 +2503,7 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
EMIT(imm8);
}
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+void Assembler::movdqa(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2585,8 +2511,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+void Assembler::movdqa(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2594,8 +2519,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
+void Assembler::movdqu(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2603,8 +2527,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+void Assembler::movdqu(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2612,8 +2535,7 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::prefetch(const Operand& src, int level) {
+void Assembler::prefetch(Operand src, int level) {
DCHECK(is_uint2(level));
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2623,8 +2545,7 @@ void Assembler::prefetch(const Operand& src, int level) {
emit_sse_operand(code, src);
}
-
-void Assembler::movsd(const Operand& dst, XMMRegister src ) {
+void Assembler::movsd(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2632,8 +2553,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
+void Assembler::movsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2); // double
EMIT(0x0F);
@@ -2641,8 +2561,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movss(const Operand& dst, XMMRegister src ) {
+void Assembler::movss(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2650,8 +2569,7 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
+void Assembler::movss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3); // float
EMIT(0x0F);
@@ -2659,8 +2577,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movd(XMMRegister dst, const Operand& src) {
+void Assembler::movd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2668,8 +2585,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movd(const Operand& dst, XMMRegister src) {
+void Assembler::movd(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2791,7 +2707,7 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2800,7 +2716,7 @@ void Assembler::pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
EMIT(shuffle);
}
-void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2809,7 +2725,7 @@ void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
EMIT(shuffle);
}
-void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrb(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2820,7 +2736,7 @@ void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrw(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2831,7 +2747,7 @@ void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::pextrd(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2842,7 +2758,7 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
EMIT(offset);
}
-void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::insertps(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2853,7 +2769,7 @@ void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2864,7 +2780,7 @@ void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(is_uint8(offset));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2874,7 +2790,7 @@ void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
+void Assembler::pinsrd(XMMRegister dst, Operand src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2885,8 +2801,7 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
EMIT(offset);
}
-
-void Assembler::addss(XMMRegister dst, const Operand& src) {
+void Assembler::addss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2894,8 +2809,7 @@ void Assembler::addss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subss(XMMRegister dst, const Operand& src) {
+void Assembler::subss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2903,8 +2817,7 @@ void Assembler::subss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulss(XMMRegister dst, const Operand& src) {
+void Assembler::mulss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2912,8 +2825,7 @@ void Assembler::mulss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divss(XMMRegister dst, const Operand& src) {
+void Assembler::divss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2921,8 +2833,7 @@ void Assembler::divss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2930,16 +2841,14 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+void Assembler::ucomiss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x2E);
emit_sse_operand(dst, src);
}
-
-void Assembler::maxss(XMMRegister dst, const Operand& src) {
+void Assembler::maxss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2947,8 +2856,7 @@ void Assembler::maxss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minss(XMMRegister dst, const Operand& src) {
+void Assembler::minss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -2959,7 +2867,7 @@ void Assembler::minss(XMMRegister dst, const Operand& src) {
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kLIG, k66, k0F38, kW1);
@@ -2967,9 +2875,8 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kLIG, k66, k0F38, kW0);
@@ -2977,37 +2884,29 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
-
-void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF3, k0F, kWIG);
}
-
-void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kNone, k0F, kWIG);
}
-
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, k66, k0F, kWIG);
}
-void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t cmp) {
vps(0xC2, dst, src1, src2);
EMIT(cmp);
}
-void Assembler::vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vshufps(XMMRegister dst, XMMRegister src1, Operand src2,
byte imm8) {
DCHECK(is_uint8(imm8));
vps(0xC6, dst, src1, src2);
@@ -3050,56 +2949,56 @@ void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
EMIT(imm8);
}
-void Assembler::vpshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::vpshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
EMIT(shuffle);
}
-void Assembler::vpshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::vpshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
EMIT(shuffle);
}
-void Assembler::vpextrb(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrb(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpextrw(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrw(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x15, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+void Assembler::vpextrd(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x16, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vinsertps(XMMRegister dst, XMMRegister src1,
- const Operand& src2, int8_t offset) {
+void Assembler::vinsertps(XMMRegister dst, XMMRegister src1, Operand src2,
+ int8_t offset) {
vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset) {
vinstr(0xC4, dst, src1, src2, k66, k0F, kWIG);
EMIT(offset);
}
-void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
+void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kWIG);
EMIT(offset);
}
-void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
+void Assembler::bmi1(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit_vex_prefix(vreg, kLZ, kNone, k0F38, kW0);
@@ -3107,8 +3006,7 @@ void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
emit_operand(reg, rm);
}
-
-void Assembler::tzcnt(Register dst, const Operand& src) {
+void Assembler::tzcnt(Register dst, Operand src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -3117,8 +3015,7 @@ void Assembler::tzcnt(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::lzcnt(Register dst, const Operand& src) {
+void Assembler::lzcnt(Register dst, Operand src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -3127,8 +3024,7 @@ void Assembler::lzcnt(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::popcnt(Register dst, const Operand& src) {
+void Assembler::popcnt(Register dst, Operand src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -3137,9 +3033,8 @@ void Assembler::popcnt(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
void Assembler::bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm) {
+ Operand rm) {
DCHECK(IsEnabled(BMI2));
EnsureSpace ensure_space(this);
emit_vex_prefix(vreg, kLZ, pp, k0F38, kW0);
@@ -3147,8 +3042,7 @@ void Assembler::bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_operand(reg, rm);
}
-
-void Assembler::rorx(Register dst, const Operand& src, byte imm8) {
+void Assembler::rorx(Register dst, Operand src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
Register vreg = Register::from_code<0>(); // VEX.vvvv unused
@@ -3159,7 +3053,7 @@ void Assembler::rorx(Register dst, const Operand& src, byte imm8) {
EMIT(imm8);
}
-void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse2_instr(XMMRegister dst, Operand src, byte prefix,
byte escape, byte opcode) {
EnsureSpace ensure_space(this);
EMIT(prefix);
@@ -3168,7 +3062,7 @@ void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::ssse3_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSSE3));
EnsureSpace ensure_space(this);
@@ -3179,7 +3073,7 @@ void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -3190,9 +3084,8 @@ void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
- VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
@@ -3200,7 +3093,7 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
@@ -3320,8 +3213,7 @@ void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
}
}
-
-void Assembler::emit_operand(Register reg, const Operand& adr) {
+void Assembler::emit_operand(Register reg, Operand adr) {
const unsigned length = adr.len_;
DCHECK_GT(length, 0);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index d57e3bee71..f4e495c36b 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -233,30 +233,19 @@ enum RoundingMode {
class Immediate BASE_EMBEDDED {
public:
- inline explicit Immediate(int x) {
+ inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NONE) {
value_.immediate = x;
- rmode_ = RelocInfo::NONE32;
- }
- inline explicit Immediate(const ExternalReference& ext) {
- value_.immediate = reinterpret_cast<int32_t>(ext.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
- }
- inline explicit Immediate(Handle<HeapObject> handle) {
- value_.immediate = reinterpret_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- }
- inline explicit Immediate(Smi* value) {
- value_.immediate = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
- }
- inline explicit Immediate(Address addr) {
- value_.immediate = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE32;
- }
- inline explicit Immediate(Address x, RelocInfo::Mode rmode) {
- value_.immediate = reinterpret_cast<int32_t>(x);
rmode_ = rmode;
}
+ inline explicit Immediate(const ExternalReference& ext)
+ : Immediate(ext.address(), RelocInfo::EXTERNAL_REFERENCE) {}
+ inline explicit Immediate(Handle<HeapObject> handle)
+ : Immediate(handle.address(), RelocInfo::EMBEDDED_OBJECT) {}
+ inline explicit Immediate(Smi* value)
+ : Immediate(reinterpret_cast<intptr_t>(value)) {}
+ inline explicit Immediate(Address addr,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : Immediate(reinterpret_cast<int32_t>(addr), rmode) {}
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedCode(CodeStub* code);
@@ -333,8 +322,7 @@ enum ScaleFactor {
times_twice_pointer_size = times_8
};
-
-class Operand BASE_EMBEDDED {
+class Operand {
public:
// reg
INLINE(explicit Operand(Register reg)) { set_modrm(3, reg); }
@@ -352,24 +340,22 @@ class Operand BASE_EMBEDDED {
}
// [disp/r]
- INLINE(explicit Operand(Immediate imm));
+ INLINE(explicit Operand(Immediate imm)) {
+ set_modrm(0, ebp);
+ set_dispr(imm.immediate(), imm.rmode_);
+ }
// [base + disp/r]
explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
+ RelocInfo::Mode rmode = RelocInfo::NONE);
// [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
+ explicit Operand(Register base, Register index, ScaleFactor scale,
+ int32_t disp, RelocInfo::Mode rmode = RelocInfo::NONE);
// [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
+ explicit Operand(Register index, ScaleFactor scale, int32_t disp,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
return Operand(index, scale, reinterpret_cast<int32_t>(table),
@@ -429,13 +415,17 @@ class Operand BASE_EMBEDDED {
byte buf_[6];
// The number of bytes in buf_.
- unsigned int len_;
+ uint8_t len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
+ // TODO(clemensh): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
-
+static_assert(sizeof(Operand) <= 2 * kPointerSize,
+ "Operand must be small enough to pass it by value");
+static_assert(IS_TRIVIALLY_COPYABLE(Operand),
+ "Operand must be trivially copyable to pass it by value");
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
@@ -529,7 +519,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -539,12 +529,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static constexpr int kSpecialTargetSize = kPointerSize;
@@ -610,53 +599,54 @@ class Assembler : public AssemblerBase {
void push(const Immediate& x);
void push_imm32(int32_t imm32);
void push(Register src);
- void push(const Operand& src);
+ void push(Operand src);
void pop(Register dst);
- void pop(const Operand& dst);
+ void pop(Operand dst);
void enter(const Immediate& size);
void leave();
// Moves
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
- void mov_b(Register dst, const Operand& src);
+ void mov_b(Register dst, Operand src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
- void mov_b(const Operand& dst, int8_t src) { mov_b(dst, Immediate(src)); }
- void mov_b(const Operand& dst, const Immediate& src);
- void mov_b(const Operand& dst, Register src);
+ void mov_b(Operand dst, int8_t src) { mov_b(dst, Immediate(src)); }
+ void mov_b(Operand dst, const Immediate& src);
+ void mov_b(Operand dst, Register src);
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, int16_t src) { mov_w(dst, Immediate(src)); }
- void mov_w(const Operand& dst, const Immediate& src);
- void mov_w(const Operand& dst, Register src);
+ void mov_w(Register dst, Operand src);
+ void mov_w(Operand dst, int16_t src) { mov_w(dst, Immediate(src)); }
+ void mov_w(Operand dst, const Immediate& src);
+ void mov_w(Operand dst, Register src);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
void mov(Register dst, Handle<HeapObject> handle);
- void mov(Register dst, const Operand& src);
+ void mov(Register dst, Operand src);
void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<HeapObject> handle);
- void mov(const Operand& dst, Register src);
+ void mov(Operand dst, const Immediate& x);
+ void mov(Operand dst, Handle<HeapObject> handle);
+ void mov(Operand dst, Register src);
+ void mov(Operand dst, Address src, RelocInfo::Mode);
void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
- void movsx_b(Register dst, const Operand& src);
+ void movsx_b(Register dst, Operand src);
void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
- void movsx_w(Register dst, const Operand& src);
+ void movsx_w(Register dst, Operand src);
void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
- void movzx_b(Register dst, const Operand& src);
+ void movzx_b(Register dst, Operand src);
void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
- void movzx_w(Register dst, const Operand& src);
+ void movzx_w(Register dst, Operand src);
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
}
- void cmov(Condition cc, Register dst, const Operand& src);
+ void cmov(Condition cc, Register dst, Operand src);
// Flag management.
void cld();
@@ -668,167 +658,170 @@ class Assembler : public AssemblerBase {
// Exchange
void xchg(Register dst, Register src);
- void xchg(Register dst, const Operand& src);
- void xchg_b(Register reg, const Operand& op);
- void xchg_w(Register reg, const Operand& op);
+ void xchg(Register dst, Operand src);
+ void xchg_b(Register reg, Operand op);
+ void xchg_w(Register reg, Operand op);
// Lock prefix
void lock();
// CompareExchange
- void cmpxchg(const Operand& dst, Register src);
- void cmpxchg_b(const Operand& dst, Register src);
- void cmpxchg_w(const Operand& dst, Register src);
+ void cmpxchg(Operand dst, Register src);
+ void cmpxchg_b(Operand dst, Register src);
+ void cmpxchg_w(Operand dst, Register src);
// Memory Fence
void lfence();
+ void pause();
+
// Arithmetics
void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
+ void adc(Register dst, Operand src);
void add(Register dst, Register src) { add(dst, Operand(src)); }
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, Register src);
+ void add(Register dst, Operand src);
+ void add(Operand dst, Register src);
void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
- void add(const Operand& dst, const Immediate& x);
+ void add(Operand dst, const Immediate& x);
void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x);
void and_(Register dst, Register src) { and_(dst, Operand(src)); }
- void and_(Register dst, const Operand& src);
- void and_(const Operand& dst, Register src);
- void and_(const Operand& dst, const Immediate& x);
+ void and_(Register dst, Operand src);
+ void and_(Operand dst, Register src);
+ void and_(Operand dst, const Immediate& x);
void cmpb(Register reg, Immediate imm8) { cmpb(Operand(reg), imm8); }
- void cmpb(const Operand& op, Immediate imm8);
- void cmpb(Register reg, const Operand& op);
- void cmpb(const Operand& op, Register reg);
+ void cmpb(Operand op, Immediate imm8);
+ void cmpb(Register reg, Operand op);
+ void cmpb(Operand op, Register reg);
void cmpb(Register dst, Register src) { cmpb(Operand(dst), src); }
- void cmpb_al(const Operand& op);
- void cmpw_ax(const Operand& op);
- void cmpw(const Operand& dst, Immediate src);
+ void cmpb_al(Operand op);
+ void cmpw_ax(Operand op);
+ void cmpw(Operand dst, Immediate src);
void cmpw(Register dst, Immediate src) { cmpw(Operand(dst), src); }
- void cmpw(Register dst, const Operand& src);
+ void cmpw(Register dst, Operand src);
void cmpw(Register dst, Register src) { cmpw(Operand(dst), src); }
- void cmpw(const Operand& dst, Register src);
+ void cmpw(Operand dst, Register src);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<HeapObject> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
- void cmp(Register reg, const Operand& op);
+ void cmp(Register reg, Operand op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
- void cmp(const Operand& op, Register reg);
- void cmp(const Operand& op, const Immediate& imm);
- void cmp(const Operand& op, Handle<HeapObject> handle);
+ void cmp(Operand op, Register reg);
+ void cmp(Operand op, const Immediate& imm);
+ void cmp(Operand op, Handle<HeapObject> handle);
void dec_b(Register dst);
- void dec_b(const Operand& dst);
+ void dec_b(Operand dst);
void dec(Register dst);
- void dec(const Operand& dst);
+ void dec(Operand dst);
void cdq();
void idiv(Register src) { idiv(Operand(src)); }
- void idiv(const Operand& src);
+ void idiv(Operand src);
void div(Register src) { div(Operand(src)); }
- void div(const Operand& src);
+ void div(Operand src);
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
- void imul(Register dst, const Operand& src); // dst = dst * src.
+ void imul(Register dst, Operand src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
- void imul(Register dst, const Operand& src, int32_t imm32);
+ void imul(Register dst, Operand src, int32_t imm32);
void inc(Register dst);
- void inc(const Operand& dst);
+ void inc(Operand dst);
- void lea(Register dst, const Operand& src);
+ void lea(Register dst, Operand src);
// Unsigned multiply instruction.
void mul(Register src); // edx:eax = eax * reg.
void neg(Register dst);
- void neg(const Operand& dst);
+ void neg(Operand dst);
void not_(Register dst);
- void not_(const Operand& dst);
+ void not_(Operand dst);
void or_(Register dst, int32_t imm32);
void or_(Register dst, Register src) { or_(dst, Operand(src)); }
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
+ void or_(Register dst, Operand src);
+ void or_(Operand dst, Register src);
void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
- void or_(const Operand& dst, const Immediate& x);
+ void or_(Operand dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
void ror(Register dst, uint8_t imm8) { ror(Operand(dst), imm8); }
- void ror(const Operand& dst, uint8_t imm8);
+ void ror(Operand dst, uint8_t imm8);
void ror_cl(Register dst) { ror_cl(Operand(dst)); }
- void ror_cl(const Operand& dst);
+ void ror_cl(Operand dst);
void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
- void sar(const Operand& dst, uint8_t imm8);
+ void sar(Operand dst, uint8_t imm8);
void sar_cl(Register dst) { sar_cl(Operand(dst)); }
- void sar_cl(const Operand& dst);
+ void sar_cl(Operand dst);
- void sbb(Register dst, const Operand& src);
+ void sbb(Register dst, Operand src);
void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
- void shl(const Operand& dst, uint8_t imm8);
+ void shl(Operand dst, uint8_t imm8);
void shl_cl(Register dst) { shl_cl(Operand(dst)); }
- void shl_cl(const Operand& dst);
+ void shl_cl(Operand dst);
void shld(Register dst, Register src, uint8_t shift);
void shld_cl(Register dst, Register src);
void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
- void shr(const Operand& dst, uint8_t imm8);
+ void shr(Operand dst, uint8_t imm8);
void shr_cl(Register dst) { shr_cl(Operand(dst)); }
- void shr_cl(const Operand& dst);
+ void shr_cl(Operand dst);
void shrd(Register dst, Register src, uint8_t shift);
void shrd_cl(Register dst, Register src) { shrd_cl(Operand(dst), src); }
- void shrd_cl(const Operand& dst, Register src);
+ void shrd_cl(Operand dst, Register src);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
- void sub(const Operand& dst, const Immediate& x);
+ void sub(Operand dst, const Immediate& x);
void sub(Register dst, Register src) { sub(dst, Operand(src)); }
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
+ void sub(Register dst, Operand src);
+ void sub(Operand dst, Register src);
+ void sub_sp_32(uint32_t imm);
void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
- void test(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
- void test(const Operand& op, Register reg) { test(reg, op); }
- void test_b(Register reg, const Operand& op);
+ void test(Register reg, Operand op);
+ void test(Operand op, const Immediate& imm);
+ void test(Operand op, Register reg) { test(reg, op); }
+ void test_b(Register reg, Operand op);
void test_b(Register reg, Immediate imm8);
- void test_b(const Operand& op, Immediate imm8);
- void test_b(const Operand& op, Register reg) { test_b(reg, op); }
+ void test_b(Operand op, Immediate imm8);
+ void test_b(Operand op, Register reg) { test_b(reg, op); }
void test_b(Register dst, Register src) { test_b(dst, Operand(src)); }
- void test_w(Register reg, const Operand& op);
+ void test_w(Register reg, Operand op);
void test_w(Register reg, Immediate imm16);
- void test_w(const Operand& op, Immediate imm16);
- void test_w(const Operand& op, Register reg) { test_w(reg, op); }
+ void test_w(Operand op, Immediate imm16);
+ void test_w(Operand op, Register reg) { test_w(reg, op); }
void test_w(Register dst, Register src) { test_w(dst, Operand(src)); }
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& dst, Register src);
+ void xor_(Register dst, Operand src);
+ void xor_(Operand dst, Register src);
void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
- void xor_(const Operand& dst, const Immediate& x);
+ void xor_(Operand dst, const Immediate& x);
// Bit operations.
- void bt(const Operand& dst, Register src);
+ void bt(Operand dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
- void bts(const Operand& dst, Register src);
+ void bts(Operand dst, Register src);
void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
- void bsr(Register dst, const Operand& src);
+ void bsr(Register dst, Operand src);
void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
- void bsf(Register dst, const Operand& src);
+ void bsf(Register dst, Operand src);
// Miscellaneous
void hlt();
@@ -857,9 +850,9 @@ class Assembler : public AssemblerBase {
// Calls
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
- int CallSize(const Operand& adr);
+ int CallSize(Operand adr);
void call(Register reg) { call(Operand(reg)); }
- void call(const Operand& adr);
+ void call(Operand adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, RelocInfo::Mode rmode);
void call(CodeStub* stub);
@@ -870,7 +863,7 @@ class Assembler : public AssemblerBase {
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
void jmp(Register reg) { jmp(Operand(reg)); }
- void jmp(const Operand& adr);
+ void jmp(Operand adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
// Conditional jumps
@@ -890,25 +883,25 @@ class Assembler : public AssemblerBase {
void fldpi();
void fldln2();
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
+ void fld_s(Operand adr);
+ void fld_d(Operand adr);
- void fstp_s(const Operand& adr);
- void fst_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fst_d(const Operand& adr);
+ void fstp_s(Operand adr);
+ void fst_s(Operand adr);
+ void fstp_d(Operand adr);
+ void fst_d(Operand adr);
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
+ void fild_s(Operand adr);
+ void fild_d(Operand adr);
- void fist_s(const Operand& adr);
+ void fist_s(Operand adr);
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
+ void fistp_s(Operand adr);
+ void fistp_d(Operand adr);
// The fisttp instructions require SSE3.
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
+ void fisttp_s(Operand adr);
+ void fisttp_d(Operand adr);
void fabs();
void fchs();
@@ -929,7 +922,7 @@ class Assembler : public AssemblerBase {
void fdiv(int i);
void fdiv_i(int i);
- void fisub_s(const Operand& adr);
+ void fisub_s(Operand adr);
void faddp(int i = 1);
void fsubp(int i = 1);
@@ -962,62 +955,62 @@ class Assembler : public AssemblerBase {
// SSE instructions
void addss(XMMRegister dst, XMMRegister src) { addss(dst, Operand(src)); }
- void addss(XMMRegister dst, const Operand& src);
+ void addss(XMMRegister dst, Operand src);
void subss(XMMRegister dst, XMMRegister src) { subss(dst, Operand(src)); }
- void subss(XMMRegister dst, const Operand& src);
+ void subss(XMMRegister dst, Operand src);
void mulss(XMMRegister dst, XMMRegister src) { mulss(dst, Operand(src)); }
- void mulss(XMMRegister dst, const Operand& src);
+ void mulss(XMMRegister dst, Operand src);
void divss(XMMRegister dst, XMMRegister src) { divss(dst, Operand(src)); }
- void divss(XMMRegister dst, const Operand& src);
+ void divss(XMMRegister dst, Operand src);
void sqrtss(XMMRegister dst, XMMRegister src) { sqrtss(dst, Operand(src)); }
- void sqrtss(XMMRegister dst, const Operand& src);
+ void sqrtss(XMMRegister dst, Operand src);
void ucomiss(XMMRegister dst, XMMRegister src) { ucomiss(dst, Operand(src)); }
- void ucomiss(XMMRegister dst, const Operand& src);
+ void ucomiss(XMMRegister dst, Operand src);
void movaps(XMMRegister dst, XMMRegister src);
void movups(XMMRegister dst, XMMRegister src);
- void movups(XMMRegister dst, const Operand& src);
- void movups(const Operand& dst, XMMRegister src);
+ void movups(XMMRegister dst, Operand src);
+ void movups(Operand dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); }
- void maxss(XMMRegister dst, const Operand& src);
+ void maxss(XMMRegister dst, Operand src);
void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
- void minss(XMMRegister dst, const Operand& src);
+ void minss(XMMRegister dst, Operand src);
- void andps(XMMRegister dst, const Operand& src);
+ void andps(XMMRegister dst, Operand src);
void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
- void xorps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, Operand src);
void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
- void orps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, Operand src);
void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
- void addps(XMMRegister dst, const Operand& src);
+ void addps(XMMRegister dst, Operand src);
void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
- void subps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, Operand src);
void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
- void mulps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, Operand src);
void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
- void divps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, Operand src);
void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
- void rcpps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, Operand src);
void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
- void rsqrtps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, Operand src);
void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
- void haddps(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
- void minps(XMMRegister dst, const Operand& src);
+ void minps(XMMRegister dst, Operand src);
void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
- void maxps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, Operand src);
void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
- void cmpps(XMMRegister dst, const Operand& src, int8_t cmp);
+ void cmpps(XMMRegister dst, Operand src, int8_t cmp);
#define SSE_CMP_P(instr, imm8) \
void instr##ps(XMMRegister dst, XMMRegister src) { \
cmpps(dst, Operand(src), imm8); \
} \
- void instr##ps(XMMRegister dst, const Operand& src) { cmpps(dst, src, imm8); }
+ void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); }
SSE_CMP_P(cmpeq, 0x0);
SSE_CMP_P(cmplt, 0x1);
@@ -1027,54 +1020,55 @@ class Assembler : public AssemblerBase {
#undef SSE_CMP_P
// SSE2 instructions
- void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, Operand src);
void cvttss2si(Register dst, XMMRegister src) {
cvttss2si(dst, Operand(src));
}
- void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, Operand src);
void cvttsd2si(Register dst, XMMRegister src) {
cvttsd2si(dst, Operand(src));
}
void cvtsd2si(Register dst, XMMRegister src);
void cvtsi2ss(XMMRegister dst, Register src) { cvtsi2ss(dst, Operand(src)); }
- void cvtsi2ss(XMMRegister dst, const Operand& src);
+ void cvtsi2ss(XMMRegister dst, Operand src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
- void cvtsi2sd(XMMRegister dst, const Operand& src);
- void cvtss2sd(XMMRegister dst, const Operand& src);
+ void cvtsi2sd(XMMRegister dst, Operand src);
+ void cvtss2sd(XMMRegister dst, Operand src);
void cvtss2sd(XMMRegister dst, XMMRegister src) {
cvtss2sd(dst, Operand(src));
}
- void cvtsd2ss(XMMRegister dst, const Operand& src);
+ void cvtsd2ss(XMMRegister dst, Operand src);
void cvtsd2ss(XMMRegister dst, XMMRegister src) {
cvtsd2ss(dst, Operand(src));
}
void cvtdq2ps(XMMRegister dst, XMMRegister src) {
cvtdq2ps(dst, Operand(src));
}
- void cvtdq2ps(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, Operand src);
void cvttps2dq(XMMRegister dst, XMMRegister src) {
cvttps2dq(dst, Operand(src));
}
- void cvttps2dq(XMMRegister dst, const Operand& src);
+ void cvttps2dq(XMMRegister dst, Operand src);
void addsd(XMMRegister dst, XMMRegister src) { addsd(dst, Operand(src)); }
- void addsd(XMMRegister dst, const Operand& src);
+ void addsd(XMMRegister dst, Operand src);
void subsd(XMMRegister dst, XMMRegister src) { subsd(dst, Operand(src)); }
- void subsd(XMMRegister dst, const Operand& src);
+ void subsd(XMMRegister dst, Operand src);
void mulsd(XMMRegister dst, XMMRegister src) { mulsd(dst, Operand(src)); }
- void mulsd(XMMRegister dst, const Operand& src);
+ void mulsd(XMMRegister dst, Operand src);
void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); }
- void divsd(XMMRegister dst, const Operand& src);
- void xorpd(XMMRegister dst, XMMRegister src);
+ void divsd(XMMRegister dst, Operand src);
+ void xorpd(XMMRegister dst, XMMRegister src) { xorpd(dst, Operand(src)); }
+ void xorpd(XMMRegister dst, Operand src);
void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); }
- void sqrtsd(XMMRegister dst, const Operand& src);
+ void sqrtsd(XMMRegister dst, Operand src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
- void ucomisd(XMMRegister dst, const Operand& src);
+ void ucomisd(XMMRegister dst, Operand src);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1085,15 +1079,15 @@ class Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
- void maxsd(XMMRegister dst, const Operand& src);
+ void maxsd(XMMRegister dst, Operand src);
void minsd(XMMRegister dst, XMMRegister src) { minsd(dst, Operand(src)); }
- void minsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, Operand src);
- void movdqa(XMMRegister dst, const Operand& src);
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqu(XMMRegister dst, const Operand& src);
- void movdqu(const Operand& dst, XMMRegister src);
- void movdq(bool aligned, XMMRegister dst, const Operand& src) {
+ void movdqa(XMMRegister dst, Operand src);
+ void movdqa(Operand dst, XMMRegister src);
+ void movdqu(XMMRegister dst, Operand src);
+ void movdqu(Operand dst, XMMRegister src);
+ void movdq(bool aligned, XMMRegister dst, Operand src) {
if (aligned) {
movdqa(dst, src);
} else {
@@ -1102,16 +1096,15 @@ class Assembler : public AssemblerBase {
}
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
- void movd(XMMRegister dst, const Operand& src);
+ void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
- void movd(const Operand& dst, XMMRegister src);
+ void movd(Operand dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
+ void movsd(XMMRegister dst, Operand src);
+ void movsd(Operand dst, XMMRegister src);
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
+ void movss(XMMRegister dst, Operand src);
+ void movss(Operand dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
void extractps(Register dst, XMMRegister src, byte imm8);
@@ -1131,42 +1124,42 @@ class Assembler : public AssemblerBase {
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
pshuflw(dst, Operand(src), shuffle);
}
- void pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
pshufd(dst, Operand(src), shuffle);
}
- void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void pextrb(Register dst, XMMRegister src, int8_t offset) {
pextrb(Operand(dst), src, offset);
}
- void pextrb(const Operand& dst, XMMRegister src, int8_t offset);
+ void pextrb(Operand dst, XMMRegister src, int8_t offset);
// Use SSE4_1 encoding for pextrw reg, xmm, imm8 for consistency
void pextrw(Register dst, XMMRegister src, int8_t offset) {
pextrw(Operand(dst), src, offset);
}
- void pextrw(const Operand& dst, XMMRegister src, int8_t offset);
+ void pextrw(Operand dst, XMMRegister src, int8_t offset);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
- void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void pextrd(Operand dst, XMMRegister src, int8_t offset);
void insertps(XMMRegister dst, XMMRegister src, int8_t offset) {
insertps(dst, Operand(src), offset);
}
- void insertps(XMMRegister dst, const Operand& src, int8_t offset);
+ void insertps(XMMRegister dst, Operand src, int8_t offset);
void pinsrb(XMMRegister dst, Register src, int8_t offset) {
pinsrb(dst, Operand(src), offset);
}
- void pinsrb(XMMRegister dst, const Operand& src, int8_t offset);
+ void pinsrb(XMMRegister dst, Operand src, int8_t offset);
void pinsrw(XMMRegister dst, Register src, int8_t offset) {
pinsrw(dst, Operand(src), offset);
}
- void pinsrw(XMMRegister dst, const Operand& src, int8_t offset);
+ void pinsrw(XMMRegister dst, Operand src, int8_t offset);
void pinsrd(XMMRegister dst, Register src, int8_t offset) {
pinsrd(dst, Operand(src), offset);
}
- void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
+ void pinsrd(XMMRegister dst, Operand src, int8_t offset);
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1178,13 +1171,13 @@ class Assembler : public AssemblerBase {
void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd231sd(dst, src1, Operand(src2));
}
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x99, dst, src1, src2);
}
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xa9, dst, src1, src2);
}
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xb9, dst, src1, src2);
}
void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1196,13 +1189,13 @@ class Assembler : public AssemblerBase {
void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmsub231sd(dst, src1, Operand(src2));
}
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9b, dst, src1, src2);
}
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xab, dst, src1, src2);
}
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbb, dst, src1, src2);
}
void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1214,13 +1207,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmadd231sd(dst, src1, Operand(src2));
}
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9d, dst, src1, src2);
}
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xad, dst, src1, src2);
}
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbd, dst, src1, src2);
}
void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1232,16 +1225,16 @@ class Assembler : public AssemblerBase {
void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmsub231sd(dst, src1, Operand(src2));
}
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9f, dst, src1, src2);
}
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xaf, dst, src1, src2);
}
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbf, dst, src1, src2);
}
- void vfmasd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd132ss(dst, src1, Operand(src2));
@@ -1252,13 +1245,13 @@ class Assembler : public AssemblerBase {
void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd231ss(dst, src1, Operand(src2));
}
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x99, dst, src1, src2);
}
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xa9, dst, src1, src2);
}
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xb9, dst, src1, src2);
}
void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1270,13 +1263,13 @@ class Assembler : public AssemblerBase {
void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmsub231ss(dst, src1, Operand(src2));
}
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9b, dst, src1, src2);
}
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xab, dst, src1, src2);
}
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbb, dst, src1, src2);
}
void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1288,13 +1281,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmadd231ss(dst, src1, Operand(src2));
}
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9d, dst, src1, src2);
}
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xad, dst, src1, src2);
}
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbd, dst, src1, src2);
}
void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1306,101 +1299,101 @@ class Assembler : public AssemblerBase {
void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfnmsub231ss(dst, src1, Operand(src2));
}
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9f, dst, src1, src2);
}
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xaf, dst, src1, src2);
}
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbf, dst, src1, src2);
}
- void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddsd(dst, src1, Operand(src2));
}
- void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vaddsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x58, dst, src1, src2);
}
void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsubsd(dst, src1, Operand(src2));
}
- void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vsubsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5c, dst, src1, src2);
}
void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmulsd(dst, src1, Operand(src2));
}
- void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmulsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x59, dst, src1, src2);
}
void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vdivsd(dst, src1, Operand(src2));
}
- void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vdivsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5e, dst, src1, src2);
}
void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmaxsd(dst, src1, Operand(src2));
}
- void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmaxsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5f, dst, src1, src2);
}
void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vminsd(dst, src1, Operand(src2));
}
- void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vminsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5d, dst, src1, src2);
}
- void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddss(dst, src1, Operand(src2));
}
- void vaddss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vaddss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x58, dst, src1, src2);
}
void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsubss(dst, src1, Operand(src2));
}
- void vsubss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vsubss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5c, dst, src1, src2);
}
void vmulss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmulss(dst, src1, Operand(src2));
}
- void vmulss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmulss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x59, dst, src1, src2);
}
void vdivss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vdivss(dst, src1, Operand(src2));
}
- void vdivss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vdivss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5e, dst, src1, src2);
}
void vmaxss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vmaxss(dst, src1, Operand(src2));
}
- void vmaxss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vmaxss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5f, dst, src1, src2);
}
void vminss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vminss(dst, src1, Operand(src2));
}
- void vminss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vminss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5d, dst, src1, src2);
}
- void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vrcpps(XMMRegister dst, XMMRegister src) { vrcpps(dst, Operand(src)); }
- void vrcpps(XMMRegister dst, const Operand& src) {
+ void vrcpps(XMMRegister dst, Operand src) {
vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG);
}
void vrsqrtps(XMMRegister dst, XMMRegister src) {
vrsqrtps(dst, Operand(src));
}
- void vrsqrtps(XMMRegister dst, const Operand& src) {
+ void vrsqrtps(XMMRegister dst, Operand src) {
vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
}
void vmovaps(XMMRegister dst, XMMRegister src) {
@@ -1409,8 +1402,7 @@ class Assembler : public AssemblerBase {
void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) {
vshufps(dst, src1, Operand(src2), imm8);
}
- void vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2,
- byte imm8);
+ void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8);
@@ -1422,75 +1414,72 @@ class Assembler : public AssemblerBase {
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshuflw(dst, Operand(src), shuffle);
}
- void vpshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void vpshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
void vpshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
vpshufd(dst, Operand(src), shuffle);
}
- void vpshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void vpshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void vpextrb(Register dst, XMMRegister src, int8_t offset) {
vpextrb(Operand(dst), src, offset);
}
- void vpextrb(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrb(Operand dst, XMMRegister src, int8_t offset);
void vpextrw(Register dst, XMMRegister src, int8_t offset) {
vpextrw(Operand(dst), src, offset);
}
- void vpextrw(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrw(Operand dst, XMMRegister src, int8_t offset);
void vpextrd(Register dst, XMMRegister src, int8_t offset) {
vpextrd(Operand(dst), src, offset);
}
- void vpextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void vpextrd(Operand dst, XMMRegister src, int8_t offset);
void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
int8_t offset) {
vinsertps(dst, src1, Operand(src2), offset);
}
- void vinsertps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ void vinsertps(XMMRegister dst, XMMRegister src1, Operand src2,
int8_t offset);
void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrb(dst, src1, Operand(src2), offset);
}
- void vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t offset);
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrw(dst, src1, Operand(src2), offset);
}
- void vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t offset);
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2,
int8_t offset) {
vpinsrd(dst, src1, Operand(src2), offset);
}
- void vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t offset);
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t offset);
void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
vcvtdq2ps(dst, Operand(src));
}
- void vcvtdq2ps(XMMRegister dst, const Operand& src) {
+ void vcvtdq2ps(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kNone, k0F, kWIG);
}
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vcvttps2dq(dst, Operand(src));
}
- void vcvttps2dq(XMMRegister dst, const Operand& src) {
+ void vcvttps2dq(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vmovdqu(XMMRegister dst, const Operand& src) {
+ void vmovdqu(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vmovdqu(const Operand& dst, XMMRegister src) {
+ void vmovdqu(Operand dst, XMMRegister src) {
vinstr(0x7F, src, xmm0, dst, kF3, k0F, kWIG);
}
void vmovd(XMMRegister dst, Register src) { vmovd(dst, Operand(src)); }
- void vmovd(XMMRegister dst, const Operand& src) {
+ void vmovd(XMMRegister dst, Operand src) {
vinstr(0x6E, dst, xmm0, src, k66, k0F, kWIG);
}
void vmovd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
- void vmovd(const Operand& dst, XMMRegister src) {
+ void vmovd(Operand dst, XMMRegister src) {
vinstr(0x7E, src, xmm0, dst, k66, k0F, kWIG);
}
@@ -1498,76 +1487,76 @@ class Assembler : public AssemblerBase {
void andn(Register dst, Register src1, Register src2) {
andn(dst, src1, Operand(src2));
}
- void andn(Register dst, Register src1, const Operand& src2) {
+ void andn(Register dst, Register src1, Operand src2) {
bmi1(0xf2, dst, src1, src2);
}
void bextr(Register dst, Register src1, Register src2) {
bextr(dst, Operand(src1), src2);
}
- void bextr(Register dst, const Operand& src1, Register src2) {
+ void bextr(Register dst, Operand src1, Register src2) {
bmi1(0xf7, dst, src2, src1);
}
void blsi(Register dst, Register src) { blsi(dst, Operand(src)); }
- void blsi(Register dst, const Operand& src) { bmi1(0xf3, ebx, dst, src); }
+ void blsi(Register dst, Operand src) { bmi1(0xf3, ebx, dst, src); }
void blsmsk(Register dst, Register src) { blsmsk(dst, Operand(src)); }
- void blsmsk(Register dst, const Operand& src) { bmi1(0xf3, edx, dst, src); }
+ void blsmsk(Register dst, Operand src) { bmi1(0xf3, edx, dst, src); }
void blsr(Register dst, Register src) { blsr(dst, Operand(src)); }
- void blsr(Register dst, const Operand& src) { bmi1(0xf3, ecx, dst, src); }
+ void blsr(Register dst, Operand src) { bmi1(0xf3, ecx, dst, src); }
void tzcnt(Register dst, Register src) { tzcnt(dst, Operand(src)); }
- void tzcnt(Register dst, const Operand& src);
+ void tzcnt(Register dst, Operand src);
void lzcnt(Register dst, Register src) { lzcnt(dst, Operand(src)); }
- void lzcnt(Register dst, const Operand& src);
+ void lzcnt(Register dst, Operand src);
void popcnt(Register dst, Register src) { popcnt(dst, Operand(src)); }
- void popcnt(Register dst, const Operand& src);
+ void popcnt(Register dst, Operand src);
void bzhi(Register dst, Register src1, Register src2) {
bzhi(dst, Operand(src1), src2);
}
- void bzhi(Register dst, const Operand& src1, Register src2) {
+ void bzhi(Register dst, Operand src1, Register src2) {
bmi2(kNone, 0xf5, dst, src2, src1);
}
void mulx(Register dst1, Register dst2, Register src) {
mulx(dst1, dst2, Operand(src));
}
- void mulx(Register dst1, Register dst2, const Operand& src) {
+ void mulx(Register dst1, Register dst2, Operand src) {
bmi2(kF2, 0xf6, dst1, dst2, src);
}
void pdep(Register dst, Register src1, Register src2) {
pdep(dst, src1, Operand(src2));
}
- void pdep(Register dst, Register src1, const Operand& src2) {
+ void pdep(Register dst, Register src1, Operand src2) {
bmi2(kF2, 0xf5, dst, src1, src2);
}
void pext(Register dst, Register src1, Register src2) {
pext(dst, src1, Operand(src2));
}
- void pext(Register dst, Register src1, const Operand& src2) {
+ void pext(Register dst, Register src1, Operand src2) {
bmi2(kF3, 0xf5, dst, src1, src2);
}
void sarx(Register dst, Register src1, Register src2) {
sarx(dst, Operand(src1), src2);
}
- void sarx(Register dst, const Operand& src1, Register src2) {
+ void sarx(Register dst, Operand src1, Register src2) {
bmi2(kF3, 0xf7, dst, src2, src1);
}
void shlx(Register dst, Register src1, Register src2) {
shlx(dst, Operand(src1), src2);
}
- void shlx(Register dst, const Operand& src1, Register src2) {
+ void shlx(Register dst, Operand src1, Register src2) {
bmi2(k66, 0xf7, dst, src2, src1);
}
void shrx(Register dst, Register src1, Register src2) {
shrx(dst, Operand(src1), src2);
}
- void shrx(Register dst, const Operand& src1, Register src2) {
+ void shrx(Register dst, Operand src1, Register src2) {
bmi2(kF2, 0xf7, dst, src2, src1);
}
void rorx(Register dst, Register src, byte imm8) {
rorx(dst, Operand(src), imm8);
}
- void rorx(Register dst, const Operand& src, byte imm8);
+ void rorx(Register dst, Operand src, byte imm8);
#define PACKED_OP_LIST(V) \
V(and, 0x54) \
@@ -1579,34 +1568,31 @@ class Assembler : public AssemblerBase {
V(div, 0x5e) \
V(max, 0x5f)
-#define AVX_PACKED_OP_DECLARE(name, opcode) \
- void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vps(opcode, dst, src1, Operand(src2)); \
- } \
- void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vps(opcode, dst, src1, src2); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vpd(opcode, dst, src1, Operand(src2)); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vpd(opcode, dst, src1, src2); \
+#define AVX_PACKED_OP_DECLARE(name, opcode) \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vps(opcode, dst, src1, Operand(src2)); \
+ } \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vps(opcode, dst, src1, src2); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vpd(opcode, dst, src1, Operand(src2)); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vpd(opcode, dst, src1, src2); \
}
PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
- void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
- void vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t cmp);
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, Operand(src2), imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vcmpps(dst, src1, src2, imm8); \
+ void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp);
+#define AVX_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, Operand(src2), imm8); \
+ } \
+ void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
}
AVX_CMP_P(vcmpeq, 0x0);
@@ -1621,7 +1607,7 @@ class Assembler : public AssemblerBase {
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
}
@@ -1632,8 +1618,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
v##instruction(dst, src1, Operand(src2)); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
}
@@ -1645,7 +1630,7 @@ class Assembler : public AssemblerBase {
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1657,7 +1642,7 @@ class Assembler : public AssemblerBase {
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1669,8 +1654,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
v##instruction(dst, src1, Operand(src2)); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
}
@@ -1681,7 +1665,7 @@ class Assembler : public AssemblerBase {
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
- void prefetch(const Operand& src, int level);
+ void prefetch(Operand src, int level);
// TODO(lrn): Need SFENCE for movnt?
// Check the code size generated from label to here.
@@ -1735,7 +1719,7 @@ class Assembler : public AssemblerBase {
}
protected:
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister reg, Operand adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst, Register src);
@@ -1774,7 +1758,7 @@ class Assembler : public AssemblerBase {
// sel specifies the /n in the modrm byte (see the Intel PRM).
void emit_arith(int sel, Operand dst, const Immediate& x);
- void emit_operand(Register reg, const Operand& adr);
+ void emit_operand(Register reg, Operand adr);
void emit_label(Label* label);
@@ -1800,18 +1784,17 @@ class Assembler : public AssemblerBase {
inline void emit_disp(Label* L, Displacement::Type type);
inline void emit_near_disp(Label* L);
- void sse2_instr(XMMRegister dst, const Operand& src, byte prefix, byte escape,
+ void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
byte opcode);
- void ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
- void sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
- void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+ void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w);
// Most BMI instructions are similar.
- void bmi1(byte op, Register reg, Register vreg, const Operand& rm);
- void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm);
+ void bmi1(byte op, Register reg, Register vreg, Operand rm);
+ void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 697539713a..bdae590078 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -163,7 +163,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() != INTEGER) {
Label fast_power, try_arithmetic_simplification;
__ DoubleToI(exponent, double_exponent, double_scratch,
- TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
&try_arithmetic_simplification,
&try_arithmetic_simplification);
__ jmp(&int_exponent);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 8bd6b5f30c..ffe3c9150b 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -35,9 +35,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -447,8 +447,8 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 6ce62e93bb..ad394020e5 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -2564,6 +2564,11 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(buffer, instruction);
}
+int Disassembler::InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ DisassemblerIA32 d(converter_, true /*crash if unimplemented*/);
+ return d.InstructionDecode(buffer, instruction);
+}
// The IA-32 assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
diff --git a/deps/v8/src/ia32/frame-constants-ia32.h b/deps/v8/src/ia32/frame-constants-ia32.h
index b745a19466..a262b92af9 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/ia32/frame-constants-ia32.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_FRAMES_IA32_H_
-#define V8_IA32_FRAMES_IA32_H_
+#ifndef V8_IA32_FRAME_CONSTANTS_IA32_H_
+#define V8_IA32_FRAME_CONSTANTS_IA32_H_
namespace v8 {
namespace internal {
@@ -50,4 +50,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_IA32_FRAMES_IA32_H_
+#endif // V8_IA32_FRAME_CONSTANTS_IA32_H_
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 9edad9a44c..a4c6894ae8 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -69,13 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // SharedFunctionInfo, vector, slot index.
- Register registers[] = {ebx, ecx, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index ebc8b39ab9..81e91f1e4f 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -14,6 +14,7 @@
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/runtime/runtime.h"
#include "src/ia32/assembler-ia32-inl.h"
@@ -79,9 +80,7 @@ void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
}
}
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> object = isolate()->heap()->root_handle(index);
if (object->IsHeapObject()) {
@@ -183,33 +182,17 @@ void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg) {
}
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan,
- Label* minus_zero, Label::Distance dst) {
+ XMMRegister scratch, Label* lost_precision,
+ Label* is_nan, Label::Distance dst) {
DCHECK(input_reg != scratch);
cvttsd2si(result_reg, Operand(input_reg));
Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst);
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- test(result_reg, Operand(result_reg));
- j(not_zero, &done, Label::kNear);
- movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // jump to minus_zero.
- and_(result_reg, 1);
- j(not_zero, minus_zero, dst);
- bind(&done);
- }
}
-void TurboAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
+void TurboAssembler::LoadUint32(XMMRegister dst, Operand src) {
Label done;
cmp(src, Immediate(0));
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
@@ -385,7 +368,7 @@ void MacroAssembler::MaybeDropFrames() {
RelocInfo::CODE_TARGET);
}
-void TurboAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
xorps(dst, dst);
cvtsi2sd(dst, src);
}
@@ -595,10 +578,11 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
push(Immediate(StackFrame::TypeToMarker(type)));
if (type == StackFrame::INTERNAL) {
push(Immediate(CodeObject()));
- }
- if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ // Check at runtime that this code object was patched correctly.
+ if (emit_debug_code()) {
+ cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ }
}
}
@@ -862,6 +846,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ Address bytes_address = reinterpret_cast<Address>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Immediate(bytes_address, RelocInfo::NONE));
+ jmp(kOffHeapTrampolineRegister);
+}
+
void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count, Register caller_args_count_reg,
Register scratch0, Register scratch1,
@@ -1001,11 +991,27 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ cmpb(Operand::StaticVariable(debug_is_active), Immediate(0));
+ j(equal, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
- j(equal, &skip_hook);
+ j(not_equal, &call_hook);
+
+ Register scratch = ecx;
+ mov(scratch, FieldOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ mov(scratch, FieldOperand(scratch, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(scratch, &skip_hook);
+ mov(scratch, FieldOperand(scratch, DebugInfo::kFlagsOffset));
+ test(scratch, Immediate(Smi::FromInt(DebugInfo::kBreakAtEntry)));
+ j(zero, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1064,6 +1070,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
+ static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1104,14 +1111,6 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFunctionCode(edi, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(edi, function);
- InvokeFunction(edi, expected, actual, flag);
-}
-
void MacroAssembler::LoadGlobalProxy(Register dst) {
mov(dst, NativeContextOperand());
mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
@@ -1159,17 +1158,14 @@ void TurboAssembler::Move(Register dst, Register src) {
}
void TurboAssembler::Move(Register dst, const Immediate& x) {
- if (!x.is_heap_object_request() && x.is_zero() &&
- RelocInfo::IsNone(x.rmode())) {
+ if (!x.is_heap_object_request() && x.is_zero()) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else {
mov(dst, x);
}
}
-void TurboAssembler::Move(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
+void TurboAssembler::Move(Operand dst, const Immediate& x) { mov(dst, x); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> object) {
mov(dst, object);
@@ -1224,7 +1220,9 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
push(eax);
Move(eax, Immediate(lower));
movd(dst, Operand(eax));
- Move(eax, Immediate(upper));
+ if (upper != lower) {
+ Move(eax, Immediate(upper));
+ }
pinsrd(dst, Operand(eax), 1);
pop(eax);
} else {
@@ -1236,8 +1234,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Pshuflw(XMMRegister dst, const Operand& src,
- uint8_t shuffle) {
+void TurboAssembler::Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshuflw(dst, src, shuffle);
@@ -1246,8 +1243,7 @@ void TurboAssembler::Pshuflw(XMMRegister dst, const Operand& src,
}
}
-void TurboAssembler::Pshufd(XMMRegister dst, const Operand& src,
- uint8_t shuffle) {
+void TurboAssembler::Pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshufd(dst, src, shuffle);
@@ -1256,7 +1252,7 @@ void TurboAssembler::Pshufd(XMMRegister dst, const Operand& src,
}
}
-void TurboAssembler::Psignb(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsignb(dst, dst, src);
@@ -1270,7 +1266,7 @@ void TurboAssembler::Psignb(XMMRegister dst, const Operand& src) {
UNREACHABLE();
}
-void TurboAssembler::Psignw(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Psignw(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsignw(dst, dst, src);
@@ -1284,7 +1280,7 @@ void TurboAssembler::Psignw(XMMRegister dst, const Operand& src) {
UNREACHABLE();
}
-void TurboAssembler::Psignd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpsignd(dst, dst, src);
@@ -1298,7 +1294,7 @@ void TurboAssembler::Psignd(XMMRegister dst, const Operand& src) {
UNREACHABLE();
}
-void TurboAssembler::Pshufb(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpshufb(dst, dst, src);
@@ -1360,7 +1356,7 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
movd(dst, xmm0);
}
-void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
bool is_64_bits) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
@@ -1388,7 +1384,7 @@ void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
}
}
-void TurboAssembler::Lzcnt(Register dst, const Operand& src) {
+void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcnt(dst, src);
@@ -1402,7 +1398,7 @@ void TurboAssembler::Lzcnt(Register dst, const Operand& src) {
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
}
-void TurboAssembler::Tzcnt(Register dst, const Operand& src) {
+void TurboAssembler::Tzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcnt(dst, src);
@@ -1415,7 +1411,7 @@ void TurboAssembler::Tzcnt(Register dst, const Operand& src) {
bind(&not_zero_src);
}
-void TurboAssembler::Popcnt(Register dst, const Operand& src) {
+void TurboAssembler::Popcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcnt(dst, src);
@@ -1548,6 +1544,59 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
}
+void TurboAssembler::RetpolineCall(Register reg) {
+ Label setup_return, setup_target, inner_indirect_branch, capture_spec;
+
+ jmp(&setup_return); // Jump past the entire retpoline below.
+
+ bind(&inner_indirect_branch);
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ mov(Operand(esp, 0), reg);
+ ret(0);
+
+ bind(&setup_return);
+ call(&inner_indirect_branch); // Callee will return after this instruction.
+}
+
+void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
+ Label setup_return, setup_target, inner_indirect_branch, capture_spec;
+
+ jmp(&setup_return); // Jump past the entire retpoline below.
+
+ bind(&inner_indirect_branch);
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ mov(Operand(esp, 0), destination, rmode);
+ ret(0);
+
+ bind(&setup_return);
+ call(&inner_indirect_branch); // Callee will return after this instruction.
+}
+
+void TurboAssembler::RetpolineJump(Register reg) {
+ Label setup_target, capture_spec;
+
+ call(&setup_target);
+
+ bind(&capture_spec);
+ pause();
+ jmp(&capture_spec);
+
+ bind(&setup_target);
+ mov(Operand(esp, 0), reg);
+ ret(0);
+}
#ifdef DEBUG
bool AreAliased(Register reg1,
@@ -1596,6 +1645,22 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
j(cc, condition_met, condition_met_distance);
}
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // In order to get the address of the current instruction, we first need
+ // to use a call and then use a pop, thus pushing the return address to
+ // the stack and then popping it into the register.
+ Label current;
+ call(&current);
+ int pc = pc_offset();
+ bind(&current);
+ pop(dst);
+ if (pc != 0) {
+ sub(dst, Immediate(pc));
+ }
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 6242333847..ce299ba5a7 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -20,12 +20,15 @@ constexpr Register kReturnRegister2 = edi;
constexpr Register kJSFunctionRegister = edi;
constexpr Register kContextRegister = esi;
constexpr Register kAllocateSizeRegister = edx;
+constexpr Register kSpeculationPoisonRegister = ebx;
constexpr Register kInterpreterAccumulatorRegister = eax;
-constexpr Register kInterpreterBytecodeOffsetRegister = ecx;
+constexpr Register kInterpreterBytecodeOffsetRegister = edx;
constexpr Register kInterpreterBytecodeArrayRegister = edi;
constexpr Register kInterpreterDispatchTableRegister = esi;
constexpr Register kJavaScriptCallArgCountRegister = eax;
+constexpr Register kJavaScriptCallCodeStartRegister = ecx;
constexpr Register kJavaScriptCallNewTargetRegister = edx;
+constexpr Register kOffHeapTrampolineRegister = ecx;
constexpr Register kRuntimeCallFunctionRegister = ebx;
constexpr Register kRuntimeCallArgCountRegister = eax;
@@ -100,7 +103,7 @@ class TurboAssembler : public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- void Move(const Operand& dst, const Immediate& x);
+ void Move(Operand dst, const Immediate& x);
// Move an immediate into an XMM register.
void Move(XMMRegister dst, uint32_t src);
@@ -113,6 +116,11 @@ class TurboAssembler : public Assembler {
void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
void Call(Label* target) { call(target); }
+ void RetpolineCall(Register reg);
+ void RetpolineCall(Address destination, RelocInfo::Mode rmode);
+
+ void RetpolineJump(Register reg);
+
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
call(target, rmode);
}
@@ -180,13 +188,13 @@ class TurboAssembler : public Assembler {
void Prologue();
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
- void Lzcnt(Register dst, const Operand& src);
+ void Lzcnt(Register dst, Operand src);
void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
- void Tzcnt(Register dst, const Operand& src);
+ void Tzcnt(Register dst, Operand src);
void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
- void Popcnt(Register dst, const Operand& src);
+ void Popcnt(Register dst, Operand src);
void Ret();
@@ -197,11 +205,11 @@ class TurboAssembler : public Assembler {
void Pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
Pshuflw(dst, Operand(src), shuffle);
}
- void Pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle);
void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
Pshufd(dst, Operand(src), shuffle);
}
- void Pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
// SSE/SSE2 instructions with AVX version.
#define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \
@@ -214,12 +222,12 @@ class TurboAssembler : public Assembler {
} \
}
- AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, const Operand&)
- AVX_OP2_WITH_TYPE(Movdqu, movdqu, const Operand&, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Movdqu, movdqu, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
- AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Operand)
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
- AVX_OP2_WITH_TYPE(Movd, movd, const Operand&, XMMRegister)
+ AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
#undef AVX_OP2_WITH_TYPE
@@ -236,27 +244,29 @@ class TurboAssembler : public Assembler {
}
#define AVX_OP3_XO(macro_name, name) \
AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
- AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, const Operand&)
+ AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
AVX_OP3_XO(Psubb, psubb)
AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
AVX_OP3_XO(Pxor, pxor)
+ AVX_OP3_XO(Xorps, xorps)
+ AVX_OP3_XO(Xorpd, xorpd)
#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE
// Non-SSE2 instructions.
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
- void Pshufb(XMMRegister dst, const Operand& src);
+ void Pshufb(XMMRegister dst, Operand src);
void Psignb(XMMRegister dst, XMMRegister src) { Psignb(dst, Operand(src)); }
- void Psignb(XMMRegister dst, const Operand& src);
+ void Psignb(XMMRegister dst, Operand src);
void Psignw(XMMRegister dst, XMMRegister src) { Psignw(dst, Operand(src)); }
- void Psignw(XMMRegister dst, const Operand& src);
+ void Psignw(XMMRegister dst, Operand src);
void Psignd(XMMRegister dst, XMMRegister src) { Psignd(dst, Operand(src)); }
- void Psignd(XMMRegister dst, const Operand& src);
+ void Psignd(XMMRegister dst, Operand src);
void Pextrb(Register dst, XMMRegister src, int8_t imm8);
void Pextrw(Register dst, XMMRegister src, int8_t imm8);
@@ -265,27 +275,27 @@ class TurboAssembler : public Assembler {
bool is_64_bits = false) {
Pinsrd(dst, Operand(src), imm8, is_64_bits);
}
- void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8,
+ void Pinsrd(XMMRegister dst, Operand src, int8_t imm8,
bool is_64_bits = false);
void LoadUint32(XMMRegister dst, Register src) {
LoadUint32(dst, Operand(src));
}
- void LoadUint32(XMMRegister dst, const Operand& src);
+ void LoadUint32(XMMRegister dst, Operand src);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
// xorps to clear the dst register before cvtsi2sd to solve this issue.
void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
- void Cvtsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtsi2sd(XMMRegister dst, Operand src);
void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
void SlowTruncateToIDelayed(Zone* zone, Register result_reg);
void Push(Register src) { push(src); }
- void Push(const Operand& src) { push(src); }
+ void Push(Operand src) { push(src); }
void Push(Immediate value) { push(value); }
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Immediate(smi)); }
@@ -320,6 +330,12 @@ class TurboAssembler : public Assembler {
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
private:
bool has_frame_ = false;
Isolate* const isolate_;
@@ -341,7 +357,7 @@ class MacroAssembler : public TurboAssembler {
mov(dst, Immediate(x));
}
}
- void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
+ void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
@@ -349,7 +365,7 @@ class MacroAssembler : public TurboAssembler {
// These methods can only be used with constant roots (i.e. non-writable
// and not in new space).
void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
+ void CompareRoot(Operand with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -358,8 +374,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
- void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
- Label* if_equal,
+ void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
@@ -372,7 +387,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
- void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
@@ -443,6 +458,7 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual, InvokeFlag flag);
// On function call, call into the debugger if necessary.
+ // This may clobber ecx.
void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
@@ -455,10 +471,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -467,8 +479,7 @@ class MacroAssembler : public TurboAssembler {
void CmpInstanceType(Register map, InstanceType type);
void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan, Label* minus_zero,
+ XMMRegister scratch, Label* lost_precision, Label* is_nan,
Label::Distance dst = Label::kFar);
// Smi tagging support.
@@ -575,6 +586,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// ---------------------------------------------------------------------------
// Utilities
@@ -584,7 +598,7 @@ class MacroAssembler : public TurboAssembler {
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Pop(Register dst) { pop(dst); }
- void Pop(const Operand& dst) { pop(dst); }
+ void Pop(Operand dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
diff --git a/deps/v8/src/ia32/sse-instr.h b/deps/v8/src/ia32/sse-instr.h
index 82cb0e2e58..7996ee50be 100644
--- a/deps/v8/src/ia32/sse-instr.h
+++ b/deps/v8/src/ia32/sse-instr.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SSE_INSTR_H_
-#define V8_SSE_INSTR_H_
+#ifndef V8_IA32_SSE_INSTR_H_
+#define V8_IA32_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
V(paddb, 66, 0F, FC) \
@@ -60,4 +60,4 @@
V(pmaxud, 66, 0F, 38, 3F) \
V(pmulld, 66, 0F, 38, 40)
-#endif // V8_SSE_INSTR_H_
+#endif // V8_IA32_SSE_INSTR_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index dfd88862bd..9800149ae1 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -300,18 +300,18 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Comment("out of bounds elements access");
Label return_undefined(this);
- // Negative indices aren't valid array indices (according to
- // the ECMAScript specification), and are stored as properties
- // in V8, not elements. So we cannot handle them here.
- GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), miss);
-
// Check if we're allowed to handle OOB accesses.
Node* allow_out_of_bounds =
IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
GotoIfNot(allow_out_of_bounds, miss);
- // For typed arrays we never lookup elements in the prototype chain.
+ // Negative indices aren't valid array indices (according to
+ // the ECMAScript specification), and are stored as properties
+ // in V8, not elements. So we cannot handle them here, except
+ // in case of typed arrays, where integer indexed properties
+ // aren't looked up in the prototype chain.
GotoIf(IsJSTypedArray(holder), &return_undefined);
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), miss);
// For all other receivers we need to check that the prototype chain
// doesn't contain any elements.
@@ -1350,7 +1350,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
BIND(&if_smi_hash);
{
- Node* hash = SmiToWord32(properties);
+ Node* hash = SmiToInt32(properties);
Node* encoded_hash =
Word32Shl(hash, Int32Constant(PropertyArray::HashField::kShift));
var_encoded_hash.Bind(encoded_hash);
@@ -1368,7 +1368,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
Node* length_intptr = ChangeInt32ToIntPtr(
Word32And(length_and_hash_int32,
Int32Constant(PropertyArray::LengthField::kMask)));
- Node* length = WordToParameter(length_intptr, mode);
+ Node* length = IntPtrToParameter(length_intptr, mode);
var_length.Bind(length);
Goto(&extend_store);
}
@@ -1412,11 +1412,11 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
// TODO(gsathya): Clean up the type conversions by creating smarter
// helpers that do the correct op based on the mode.
Node* new_capacity_int32 =
- TruncateWordToWord32(ParameterToWord(new_capacity, mode));
+ TruncateIntPtrToInt32(ParameterToIntPtr(new_capacity, mode));
Node* new_length_and_hash_int32 =
Word32Or(var_encoded_hash.value(), new_capacity_int32);
StoreObjectField(new_properties, PropertyArray::kLengthAndHashOffset,
- SmiFromWord32(new_length_and_hash_int32));
+ SmiFromInt32(new_length_and_hash_int32));
StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties);
Comment("] Extend storage");
Goto(&done);
@@ -1614,26 +1614,22 @@ void AccessorAssembler::EmitElementLoad(
SmiUntag(CAST(LoadObjectField(object, JSTypedArray::kLengthOffset)));
GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
- // Backing store = external_pointer + base_pointer.
- Node* external_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* base_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* backing_store =
- IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
+ Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
Label uint8_elements(this), int8_elements(this), uint16_elements(this),
int16_elements(this), uint32_elements(this), int32_elements(this),
- float32_elements(this), float64_elements(this);
+ float32_elements(this), float64_elements(this), bigint64_elements(this),
+ biguint64_elements(this);
Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements};
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements,
+ &bigint64_elements, &biguint64_elements};
int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+ UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
+ INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
+ BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
const size_t kTypedElementsKindCount =
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
@@ -1645,27 +1641,27 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&int8_elements);
{
Comment("INT8_ELEMENTS");
Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&uint16_elements);
{
Comment("UINT16_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(1));
Node* element = Load(MachineType::Uint16(), backing_store, index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&int16_elements);
{
Comment("INT16_ELEMENTS");
Node* index = WordShl(intptr_index, IntPtrConstant(1));
Node* element = Load(MachineType::Int16(), backing_store, index);
- exit_point->Return(SmiFromWord32(element));
+ exit_point->Return(SmiFromInt32(element));
}
BIND(&uint32_elements);
{
@@ -1697,6 +1693,18 @@ void AccessorAssembler::EmitElementLoad(
var_double_value->Bind(element);
Goto(rebox_double);
}
+ BIND(&bigint64_elements);
+ {
+ Comment("BIGINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGINT64_ELEMENTS, INTPTR_PARAMETERS));
+ }
+ BIND(&biguint64_elements);
+ {
+ Comment("BIGUINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGUINT64_ELEMENTS, INTPTR_PARAMETERS));
+ }
}
}
@@ -1718,13 +1726,13 @@ void AccessorAssembler::BranchIfStrictMode(Node* vector, Node* slot,
LoadObjectField(vector, FeedbackVector::kSharedFunctionInfoOffset);
Node* metadata =
LoadObjectField(sfi, SharedFunctionInfo::kFeedbackMetadataOffset);
- Node* slot_int = SmiToWord32(slot);
+ Node* slot_int = SmiToInt32(slot);
// See VectorICComputer::index().
const int kItemsPerWord = FeedbackMetadata::VectorICComputer::kItemsPerWord;
Node* word_index = Int32Div(slot_int, Int32Constant(kItemsPerWord));
Node* word_offset = Int32Mod(slot_int, Int32Constant(kItemsPerWord));
- Node* data = SmiToWord32(LoadFixedArrayElement(
+ Node* data = SmiToInt32(LoadFixedArrayElement(
metadata, ChangeInt32ToIntPtr(word_index),
FeedbackMetadata::kReservedIndexCount * kPointerSize, INTPTR_PARAMETERS));
// See VectorICComputer::decode().
@@ -1803,10 +1811,12 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
BIND(&if_oob);
{
Comment("out of bounds");
- // Negative keys can't take the fast OOB path.
- GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), slow);
// Positive OOB indices are effectively the same as hole loads.
- Goto(&if_element_hole);
+ GotoIf(IntPtrGreaterThanOrEqual(index, IntPtrConstant(0)),
+ &if_element_hole);
+ // Negative keys can't take the fast OOB path, except for typed arrays.
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), slow);
+ Return(UndefinedConstant());
}
BIND(&if_element_hole);
@@ -1977,6 +1987,9 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// TODO(jkummerow): Consider supporting JSModuleNamespace.
GotoIfNot(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), slow);
+ // Private field/symbol lookup is not supported.
+ GotoIf(IsPrivateSymbol(p->name), slow);
+
direct_exit.ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
p->context, receiver /*holder is the same as receiver*/, p->name,
@@ -2004,7 +2017,7 @@ Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
- Node* map32 = TruncateWordToWord32(BitcastTaggedToWord(map));
+ Node* map32 = TruncateIntPtrToInt32(BitcastTaggedToWord(map));
// Base the offset on a simple combination of name and map.
Node* hash = Int32Add(hash_field, map32);
uint32_t mask = (StubCache::kPrimaryTableSize - 1)
@@ -2016,8 +2029,8 @@ Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
// See v8::internal::StubCache::SecondaryOffset().
// Use the seed from the primary cache in the secondary cache.
- Node* name32 = TruncateWordToWord32(BitcastTaggedToWord(name));
- Node* hash = Int32Sub(TruncateWordToWord32(seed), name32);
+ Node* name32 = TruncateIntPtrToInt32(BitcastTaggedToWord(name));
+ Node* hash = Int32Sub(TruncateIntPtrToInt32(seed), name32);
hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
int32_t mask = (StubCache::kSecondaryTableSize - 1)
<< StubCache::kCacheIndexShift;
@@ -2340,9 +2353,9 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
Comment("Load lexical variable");
TNode<IntPtrT> lexical_handler = SmiUntag(CAST(maybe_weak_cell));
TNode<IntPtrT> context_index =
- Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::ContextIndexBits>(lexical_handler));
TNode<IntPtrT> slot_index =
- Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::SlotIndexBits>(lexical_handler));
TNode<Context> context = lazy_context();
TNode<Context> script_context = LoadScriptContext(context, context_index);
TNode<Object> result = LoadContextElement(script_context, slot_index);
@@ -2685,9 +2698,9 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
Comment("Store lexical variable");
TNode<IntPtrT> lexical_handler = SmiUntag(maybe_weak_cell);
TNode<IntPtrT> context_index =
- Signed(DecodeWord<GlobalICNexus::ContextIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::ContextIndexBits>(lexical_handler));
TNode<IntPtrT> slot_index =
- Signed(DecodeWord<GlobalICNexus::SlotIndexBits>(lexical_handler));
+ Signed(DecodeWord<FeedbackNexus::SlotIndexBits>(lexical_handler));
TNode<Context> script_context =
LoadScriptContext(CAST(pp->context), context_index);
StoreContextElement(script_context, slot_index, CAST(pp->value));
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 46376dd6a8..3e4f551c14 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
-#define V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+#ifndef V8_IC_ACCESSOR_ASSEMBLER_H_
+#define V8_IC_ACCESSOR_ASSEMBLER_H_
#include "src/code-stub-assembler.h"
@@ -335,4 +335,4 @@ class ExitPoint {
} // namespace internal
} // namespace v8
-#endif // V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+#endif // V8_IC_ACCESSOR_ASSEMBLER_H_
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index d7afd7b655..420f66c174 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
-#define V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
+#ifndef V8_IC_BINARY_OP_ASSEMBLER_H_
+#define V8_IC_BINARY_OP_ASSEMBLER_H_
#include <functional>
#include "src/code-stub-assembler.h"
@@ -60,4 +60,4 @@ class BinaryOpAssembler : public CodeStubAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_SRC_IC_BINARY_OP_ASSEMBLER_H_
+#endif // V8_IC_BINARY_OP_ASSEMBLER_H_
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index d6fa23611e..83ab9d86b8 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IC_INL_H_
-#define V8_IC_INL_H_
+#ifndef V8_IC_IC_INL_H_
+#define V8_IC_IC_INL_H_
#include "src/ic/ic.h"
@@ -59,4 +59,4 @@ bool IC::AddressIsDeoptimizedCode(Isolate* isolate, Address address) {
} // namespace internal
} // namespace v8
-#endif // V8_IC_INL_H_
+#endif // V8_IC_IC_INL_H_
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 62a2e7cf59..e6fa0b1ceb 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -58,12 +58,18 @@ const char* GetModifier(KeyedAccessLoadMode mode) {
}
const char* GetModifier(KeyedAccessStoreMode mode) {
- if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
- if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- return ".IGNORE_OOB";
+ switch (mode) {
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ return ".COW";
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ return ".STORE+COW";
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ return ".IGNORE_OOB";
+ default:
+ break;
}
- if (IsGrowStoreMode(mode)) return ".GROW";
- return "";
+ DCHECK(!IsCOWHandlingStoreMode(mode));
+ return IsGrowStoreMode(mode) ? ".GROW" : "";
}
} // namespace
@@ -89,12 +95,10 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
const char* modifier = "";
if (IsKeyedLoadIC()) {
- KeyedAccessLoadMode mode =
- casted_nexus<KeyedLoadICNexus>()->GetKeyedAccessLoadMode();
+ KeyedAccessLoadMode mode = nexus()->GetKeyedAccessLoadMode();
modifier = GetModifier(mode);
} else if (IsKeyedStoreIC()) {
- KeyedAccessStoreMode mode =
- casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ KeyedAccessStoreMode mode = nexus()->GetKeyedAccessStoreMode();
modifier = GetModifier(mode);
}
@@ -147,13 +151,14 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
#define TRACE_IC(type, name) TraceIC(type, name)
-IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
+IC::IC(FrameDepth depth, Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
: isolate_(isolate),
vector_set_(false),
kind_(FeedbackSlotKind::kInvalid),
target_maps_set_(false),
slow_stub_reason_(nullptr),
- nexus_(nexus) {
+ nexus_(vector, slot) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
@@ -199,9 +204,8 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
constant_pool_address_ = constant_pool;
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
- DCHECK_NOT_NULL(nexus);
- kind_ = nexus->kind();
- state_ = nexus->StateFromFeedback();
+ kind_ = nexus_.kind();
+ state_ = nexus_.StateFromFeedback();
old_state_ = state_;
}
@@ -251,12 +255,12 @@ static void LookupForRead(LookupIterator* it) {
bool IC::ShouldRecomputeHandler(Handle<String> name) {
if (!RecomputeHandlerForName(name)) return false;
- maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
-
// This is a contextual access, always just update the handler and stay
// monomorphic.
if (IsGlobalIC()) return true;
+ maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
+
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
// to a more general elements kind.
@@ -315,6 +319,13 @@ MaybeHandle<Object> IC::ReferenceError(Handle<Name> name) {
isolate(), NewReferenceError(MessageTemplate::kNotDefined, name), Object);
}
+// static
+void IC::OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
+ JSFunction* host_function, const char* reason) {
+ FeedbackVector* vector = nexus->vector();
+ FeedbackSlot slot = nexus->slot();
+ OnFeedbackChanged(isolate, vector, slot, host_function, reason);
+}
// static
void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector* vector,
@@ -385,21 +396,15 @@ bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
vector_set_ = true;
OnFeedbackChanged(
- isolate(), *vector(), slot(), GetHostFunction(),
+ isolate(), nexus(), GetHostFunction(),
new_state == PREMONOMORPHIC ? "Premonomorphic" : "Megamorphic");
return changed;
}
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler) {
- if (IsLoadGlobalIC()) {
- LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- nexus->ConfigureHandlerMode(handler);
-
- } else if (IsStoreGlobalIC()) {
- StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
- nexus->ConfigureHandlerMode(handler);
-
+ if (IsGlobalIC()) {
+ nexus()->ConfigureHandlerMode(handler);
} else {
// Non-keyed ICs don't track the name explicitly.
if (!is_keyed()) name = Handle<Name>::null();
@@ -407,7 +412,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
}
vector_set_ = true;
- OnFeedbackChanged(isolate(), *vector(), slot(), GetHostFunction(),
+ OnFeedbackChanged(isolate(), nexus(), GetHostFunction(),
IsLoadGlobalIC() ? "LoadGlobal" : "Monomorphic");
}
@@ -419,8 +424,7 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
nexus()->ConfigurePolymorphic(name, maps, handlers);
vector_set_ = true;
- OnFeedbackChanged(isolate(), *vector(), slot(), GetHostFunction(),
- "Polymorphic");
+ OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Polymorphic");
}
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
@@ -451,6 +455,19 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
LookupIterator it(object, name);
LookupForRead(&it);
+ if (name->IsPrivate()) {
+ if (name->IsPrivateField() && !it.IsFound()) {
+ return TypeError(MessageTemplate::kInvalidPrivateFieldAccess, object,
+ name);
+ }
+
+ // IC handling of private symbols/fields lookup on JSProxy is not
+ // supported.
+ if (object->IsJSProxy()) {
+ use_ic = false;
+ }
+ }
+
if (it.IsFound() || !ShouldThrowReferenceError()) {
// Update inline cache and stub cache.
if (use_ic) UpdateCaches(&it);
@@ -492,9 +509,8 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
}
if (FLAG_use_ic) {
- LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
- lookup_result.slot_index)) {
+ if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_LoadScriptContextField);
} else {
// Given combination of indices can't be encoded, so use slow stub.
@@ -638,14 +654,14 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
UpdateMonomorphicIC(handler, name);
break;
}
- // Fall through.
+ V8_FALLTHROUGH;
case POLYMORPHIC:
if (UpdatePolymorphicIC(name, handler)) break;
if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
CopyICToMegamorphicCache(name);
}
ConfigureVectorState(MEGAMORPHIC, name);
- // Fall through.
+ V8_FALLTHROUGH;
case MEGAMORPHIC:
UpdateMegamorphicCache(*receiver_map(), *name, *handler);
// Indicate that we've handled this case.
@@ -685,8 +701,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
- LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+ nexus()->ConfigurePropertyCellMode(lookup->GetPropertyCell());
TRACE_IC("LoadGlobalIC", lookup->name());
return;
}
@@ -1199,8 +1214,14 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Object);
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
!object->IsJSValue()) {
- if ((object->IsJSReceiver() || object->IsString()) &&
- key->ToArrayIndex(&index)) {
+ // For regular JSReceiver or String {object}s the {key} must be a positive
+ // array index, for JSTypedArray {object}s we can also support negative
+ // {key}s which we just map into the [2*31,2*32-1] range (via a bit_cast).
+ // This is valid since JSTypedArray::length is always a Smi.
+ if (((object->IsJSReceiver() || object->IsString()) &&
+ key->ToArrayIndex(&index)) ||
+ (object->IsJSTypedArray() &&
+ key->ToInt32(bit_cast<int32_t*>(&index)))) {
KeyedAccessLoadMode load_mode = GetLoadMode(object, index);
UpdateLoadElement(Handle<HeapObject>::cast(object), load_mode);
if (is_vector_set()) {
@@ -1287,7 +1308,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
}
}
- receiver = it->GetStoreTarget();
+ receiver = it->GetStoreTarget<JSObject>();
if (it->ExtendingNonExtensible(receiver)) return false;
created_new_transition_ =
it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
@@ -1322,9 +1343,8 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
}
if (FLAG_use_ic) {
- StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
- if (nexus->ConfigureLexicalVarMode(lookup_result.context_index,
- lookup_result.slot_index)) {
+ if (nexus()->ConfigureLexicalVarMode(lookup_result.context_index,
+ lookup_result.slot_index)) {
TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_StoreScriptContextField);
} else {
// Given combination of indices can't be encoded, so use slow stub.
@@ -1383,7 +1403,23 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
LookupIterator it = LookupIterator::ForTransitionHandler(
isolate(), object, name, value, cached_handler, transition_map);
- if (FLAG_use_ic) UpdateCaches(&it, value, store_mode, cached_handler);
+
+ bool use_ic = FLAG_use_ic;
+
+ if (name->IsPrivate()) {
+ if (name->IsPrivateField() && !it.IsFound()) {
+ return TypeError(MessageTemplate::kInvalidPrivateFieldAccess, object,
+ name);
+ }
+
+ // IC handling of private fields/symbols stores on JSProxy is not
+ // supported.
+ if (object->IsJSProxy()) {
+ use_ic = false;
+ }
+ }
+
+ if (use_ic) UpdateCaches(&it, value, store_mode, cached_handler);
MAYBE_RETURN_NULL(
Object::SetProperty(&it, value, language_mode(), store_mode));
@@ -1411,8 +1447,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
- StoreGlobalICNexus* nexus = casted_nexus<StoreGlobalICNexus>();
- nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+ nexus()->ConfigurePropertyCellMode(lookup->GetPropertyCell());
TRACE_IC("StoreGlobalIC", lookup->name());
return;
}
@@ -1439,7 +1474,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
case LookupIterator::TRANSITION: {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- Handle<JSObject> store_target = lookup->GetStoreTarget();
+ Handle<JSObject> store_target = lookup->GetStoreTarget<JSObject>();
if (store_target->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalTransitionDH);
@@ -1692,7 +1727,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
if (receiver_map.is_identical_to(previous_receiver_map) &&
old_store_mode == STANDARD_STORE &&
- (store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ (store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
// A "normal" IC that handles stores can switch to a version that can
@@ -1787,10 +1822,10 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
}
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
DCHECK(map->has_fixed_typed_array_elements());
- // Fall through
+ V8_FALLTHROUGH;
case STORE_NO_TRANSITION_HANDLE_COW:
case STANDARD_STORE:
- case STORE_AND_GROW_NO_TRANSITION:
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
return map;
}
UNREACHABLE();
@@ -1799,7 +1834,7 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
Handle<Object> KeyedStoreIC::StoreElementHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
DCHECK(!receiver_map->DictionaryElementsInPrototypeChainOnly());
@@ -1840,7 +1875,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
MapHandles* receiver_maps, ObjectHandles* handlers,
KeyedAccessStoreMode store_mode) {
DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
@@ -1915,7 +1950,7 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
}
- return STORE_AND_GROW_NO_TRANSITION;
+ return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
} else {
// Handle only in-bounds elements accesses.
if (receiver->HasSmiElements()) {
@@ -2005,7 +2040,13 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
old_receiver_map = handle(receiver->map(), isolate());
is_arguments = receiver->IsJSArgumentsObject();
bool is_proxy = receiver->IsJSProxy();
- key_is_valid_index = key->IsSmi() && Smi::ToInt(*key) >= 0;
+ // For JSTypedArray {object}s we can handle negative indices as OOB
+ // accesses, since integer indexed properties are never looked up
+ // on the prototype chain. For this we simply map the negative {key}s
+ // to the [2**31,2**32-1] range, which is safe since JSTypedArray::length
+ // is always an unsigned Smi.
+ key_is_valid_index =
+ key->IsSmi() && (Smi::ToInt(*key) >= 0 || object->IsJSTypedArray());
if (!is_arguments && !is_proxy) {
if (key_is_valid_index) {
uint32_t index = static_cast<uint32_t>(Smi::ToInt(*key));
@@ -2071,29 +2112,26 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
Handle<Name> key = args.at<Name>(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
FeedbackSlotKind kind = vector->GetKind(vector_slot);
if (IsLoadICKind(kind)) {
- LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(isolate, &nexus);
+ LoadIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
} else if (IsLoadGlobalICKind(kind)) {
DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
receiver = isolate->global_object();
- LoadGlobalICNexus nexus(vector, vector_slot);
- LoadGlobalIC ic(isolate, &nexus);
+ LoadGlobalIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
} else {
DCHECK(IsKeyedLoadICKind(kind));
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(isolate, &nexus);
+ KeyedLoadIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
@@ -2108,10 +2146,9 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
Handle<String> name = args.at<String>(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LoadGlobalICNexus nexus(vector, vector_slot);
- LoadGlobalIC ic(isolate, &nexus);
+ LoadGlobalIC ic(isolate, vector, vector_slot);
ic.UpdateState(global, name);
Handle<Object> result;
@@ -2150,7 +2187,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
if (!is_found) {
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
// It is actually a LoadGlobalICs here but the predicate handles this case
// properly.
@@ -2171,9 +2208,8 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
Handle<Object> key = args.at(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
- KeyedLoadICNexus nexus(vector, vector_slot);
- KeyedLoadIC ic(isolate, &nexus);
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ KeyedLoadIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
@@ -2188,24 +2224,21 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(isolate, &nexus);
+ StoreIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else if (IsStoreGlobalICKind(kind)) {
DCHECK_EQ(isolate->native_context()->global_proxy(), *receiver);
receiver = isolate->global_object();
- StoreGlobalICNexus nexus(vector, vector_slot);
- StoreGlobalIC ic(isolate, &nexus);
+ StoreGlobalIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
} else {
DCHECK(IsKeyedStoreICKind(kind));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(isolate, &nexus);
+ KeyedStoreIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
@@ -2219,9 +2252,8 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
Handle<Smi> slot = args.at<Smi>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Name> key = args.at<Name>(3);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
- StoreGlobalICNexus nexus(vector, vector_slot);
- StoreGlobalIC ic(isolate, &nexus);
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ StoreGlobalIC ic(isolate, vector, vector_slot);
Handle<JSGlobalObject> global = isolate->global_object();
ic.UpdateState(global, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(key, value));
@@ -2238,7 +2270,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
#ifdef DEBUG
{
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
DCHECK(IsStoreGlobalICKind(slot_kind));
Handle<Object> receiver = args.at(3);
@@ -2272,7 +2304,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
return *value;
}
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
@@ -2289,9 +2321,8 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(isolate, &nexus);
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
+ KeyedStoreIC ic(isolate, vector, vector_slot);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
@@ -2306,7 +2337,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Object> object = args.at(3);
Handle<Object> key = args.at(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
RETURN_RESULT_OR_FAILURE(
isolate,
@@ -2324,7 +2355,7 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
Handle<Map> map = args.at<Map>(3);
Handle<Smi> slot = args.at<Smi>(4);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
@@ -2336,11 +2367,6 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
}
-RUNTIME_FUNCTION(Runtime_Unreachable) {
- UNREACHABLE();
-}
-
-
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
@@ -2413,7 +2439,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
Handle<Smi> slot = args.at<Smi>(3);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
// It could actually be any kind of load IC slot here but the predicate
// handles all the cases properly.
@@ -2436,7 +2462,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<JSObject> receiver = args.at<JSObject>(3);
Handle<Name> name = args.at<Name>(4);
- FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+ FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
// TODO(ishell): Cache interceptor_holder in the store handler like we do
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index a63202395b..8a47d8d19c 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IC_H_
-#define V8_IC_H_
+#ifndef V8_IC_IC_H_
+#define V8_IC_IC_H_
#include <vector>
@@ -36,7 +36,8 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = nullptr);
+ IC(FrameDepth depth, Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot);
virtual ~IC() {}
State state() const { return state_; }
@@ -67,6 +68,9 @@ class IC {
FeedbackSlot slot, JSFunction* host_function,
const char* reason);
+ static void OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
+ JSFunction* host_function, const char* reason);
+
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
@@ -151,17 +155,12 @@ class IC {
return !target_maps_.empty() ? *target_maps_[0] : nullptr;
}
- Handle<FeedbackVector> vector() const { return nexus()->vector_handle(); }
- FeedbackSlot slot() const { return nexus()->slot(); }
State saved_state() const {
return state() == RECOMPUTE_HANDLER ? old_state_ : state();
}
- template <class NexusClass>
- NexusClass* casted_nexus() {
- return static_cast<NexusClass*>(nexus_);
- }
- FeedbackNexus* nexus() const { return nexus_; }
+ const FeedbackNexus* nexus() const { return &nexus_; }
+ FeedbackNexus* nexus() { return &nexus_; }
private:
inline Address constant_pool() const;
@@ -200,7 +199,7 @@ class IC {
const char* slow_stub_reason_;
- FeedbackNexus* nexus_;
+ FeedbackNexus nexus_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -208,18 +207,15 @@ class IC {
class CallIC : public IC {
public:
- CallIC(Isolate* isolate, CallICNexus* nexus)
- : IC(EXTRA_CALL_FRAME, isolate, nexus) {
- DCHECK_NOT_NULL(nexus);
- }
+ CallIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : IC(EXTRA_CALL_FRAME, isolate, vector, slot) {}
};
class LoadIC : public IC {
public:
- LoadIC(Isolate* isolate, FeedbackNexus* nexus)
- : IC(NO_EXTRA_FRAME, isolate, nexus) {
- DCHECK_NOT_NULL(nexus);
+ LoadIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : IC(NO_EXTRA_FRAME, isolate, vector, slot) {
DCHECK(IsAnyLoad());
}
@@ -252,8 +248,9 @@ class LoadIC : public IC {
class LoadGlobalIC : public LoadIC {
public:
- LoadGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
- : LoadIC(isolate, nexus) {}
+ LoadGlobalIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : LoadIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
@@ -265,10 +262,9 @@ class LoadGlobalIC : public LoadIC {
class KeyedLoadIC : public LoadIC {
public:
- KeyedLoadIC(Isolate* isolate, KeyedLoadICNexus* nexus)
- : LoadIC(isolate, nexus) {
- DCHECK_NOT_NULL(nexus);
- }
+ KeyedLoadIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : LoadIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Object> key);
@@ -297,14 +293,12 @@ class KeyedLoadIC : public LoadIC {
class StoreIC : public IC {
public:
- StoreIC(Isolate* isolate, FeedbackNexus* nexus)
- : IC(NO_EXTRA_FRAME, isolate, nexus) {
+ StoreIC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : IC(NO_EXTRA_FRAME, isolate, vector, slot) {
DCHECK(IsAnyStore());
}
- LanguageMode language_mode() const {
- return nexus()->vector()->GetLanguageMode(nexus()->slot());
- }
+ LanguageMode language_mode() const { return nexus()->GetLanguageMode(); }
MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -337,8 +331,9 @@ class StoreIC : public IC {
class StoreGlobalIC : public StoreIC {
public:
- StoreGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
- : StoreIC(isolate, nexus) {}
+ StoreGlobalIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : StoreIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Name> name,
Handle<Object> value);
@@ -358,11 +353,12 @@ enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
class KeyedStoreIC : public StoreIC {
public:
KeyedAccessStoreMode GetKeyedAccessStoreMode() {
- return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ return nexus()->GetKeyedAccessStoreMode();
}
- KeyedStoreIC(Isolate* isolate, KeyedStoreICNexus* nexus)
- : StoreIC(isolate, nexus) {}
+ KeyedStoreIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : StoreIC(isolate, vector, slot) {}
MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
Handle<Object> name,
@@ -389,4 +385,4 @@ class KeyedStoreIC : public StoreIC {
} // namespace internal
} // namespace v8
-#endif // V8_IC_H_
+#endif // V8_IC_IC_H_
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index b9a11c2ec7..4997267ddd 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -428,9 +428,10 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
void KeyedStoreGenericAssembler::EmitGenericElementStore(
Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
Node* value, Node* context, Label* slow) {
- Label if_fast(this), if_in_bounds(this), if_increment_length_by_one(this),
- if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
- if_typed_array(this), if_dictionary(this);
+ Label if_fast(this), if_in_bounds(this), if_out_of_bounds(this),
+ if_increment_length_by_one(this), if_bump_length_with_gap(this),
+ if_grow(this), if_nonfast(this), if_typed_array(this),
+ if_dictionary(this);
Node* elements = LoadElements(receiver);
Node* elements_kind = LoadMapElementsKind(receiver_map);
Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
@@ -440,7 +441,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &if_array);
{
Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
- Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds, &if_grow);
+ Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds,
+ &if_out_of_bounds);
}
BIND(&if_array);
{
@@ -459,6 +461,16 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
kDontChangeLength);
}
+ BIND(&if_out_of_bounds);
+ {
+ // Integer indexed out-of-bounds accesses to typed arrays are simply
+ // ignored, since we never look up integer indexed properties on the
+ // prototypes of typed arrays. For all other types, we may need to
+ // grow the backing store.
+ GotoIfNot(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE), &if_grow);
+ Return(value);
+ }
+
BIND(&if_increment_length_by_one);
{
StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
@@ -911,9 +923,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&strict);
{
- Node* message = SmiConstant(MessageTemplate::kNoSetterInCallback);
- TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
- var_accessor_holder.value());
+ ThrowTypeError(p->context, MessageTemplate::kNoSetterInCallback,
+ p->name, var_accessor_holder.value());
}
}
}
@@ -926,10 +937,9 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&strict);
{
- Node* message = SmiConstant(MessageTemplate::kStrictReadOnlyProperty);
Node* type = Typeof(p->receiver);
- TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
- type, p->receiver);
+ ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
+ p->name, type, p->receiver);
}
}
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 4d82840be3..1a0de3b2b4 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
-#define V8_SRC_IC_KEYED_STORE_GENERIC_H_
+#ifndef V8_IC_KEYED_STORE_GENERIC_H_
+#define V8_IC_KEYED_STORE_GENERIC_H_
#include "src/globals.h"
@@ -27,4 +27,4 @@ class StoreICUninitializedGenerator {
} // namespace internal
} // namespace v8
-#endif // V8_SRC_IC_KEYED_STORE_GENERIC_H_
+#endif // V8_IC_KEYED_STORE_GENERIC_H_
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index cd081edfb2..870266eefd 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STUB_CACHE_H_
-#define V8_STUB_CACHE_H_
+#ifndef V8_IC_STUB_CACHE_H_
+#define V8_IC_STUB_CACHE_H_
#include "src/macro-assembler.h"
#include "src/objects/name.h"
@@ -140,4 +140,4 @@ class StubCache {
} // namespace internal
} // namespace v8
-#endif // V8_STUB_CACHE_H_
+#endif // V8_IC_STUB_CACHE_H_
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index b652d6a6db..6a3bd4ca61 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
static const int kInitialIdentityMapSize = 4;
-static const int kResizeFactor = 4;
+static const int kResizeFactor = 2;
IdentityMapBase::~IdentityMapBase() {
// Clear must be called by the subclass to avoid calling the virtual
@@ -87,7 +87,8 @@ void* IdentityMapBase::DeleteIndex(int index) {
size_--;
DCHECK_GE(size_, 0);
- if (size_ * kResizeFactor < capacity_ / kResizeFactor) {
+ if (capacity_ > kInitialIdentityMapSize &&
+ size_ * kResizeFactor < capacity_ / kResizeFactor) {
Resize(capacity_ / kResizeFactor);
return ret_value; // No need to fix collisions as resize reinserts keys.
}
@@ -194,6 +195,14 @@ void* IdentityMapBase::DeleteEntry(Object* key) {
return DeleteIndex(index);
}
+Object* IdentityMapBase::KeyAtIndex(int index) const {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, capacity_);
+ DCHECK_NE(keys_[index], heap_->not_mapped_symbol());
+ CHECK(is_iterable()); // Must be iterable to access by index;
+ return keys_[index];
+}
+
IdentityMapBase::RawEntry IdentityMapBase::EntryAtIndex(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, capacity_);
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index cd198e1cb5..4e69d3198a 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -46,6 +46,8 @@ class IdentityMapBase {
void* DeleteEntry(Object* key);
void Clear();
+ Object* KeyAtIndex(int index) const;
+
V8_EXPORT_PRIVATE RawEntry EntryAtIndex(int index) const;
V8_EXPORT_PRIVATE int NextIndex(int index) const;
@@ -126,8 +128,13 @@ class IdentityMap : public IdentityMapBase {
return *this;
}
- V* operator*() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
- V* operator->() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
+ Object* key() const { return map_->KeyAtIndex(index_); }
+ V* entry() const {
+ return reinterpret_cast<V*>(map_->EntryAtIndex(index_));
+ }
+
+ V* operator*() { return entry(); }
+ V* operator->() { return entry(); }
bool operator!=(const Iterator& other) { return index_ != other.index_; }
private:
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 85b506a956..f396d64b99 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -7,6 +7,7 @@ include_rules = [
"+src/base/platform/platform.h",
"+src/conversions.h",
"+src/flags.h",
+ "+src/utils.h",
"+src/unicode-cache.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index a5e981cda5..d13e5f8695 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -49,6 +49,7 @@ namespace v8_inspector {
namespace {
static const char privateKeyName[] = "v8-inspector#injectedScript";
+static const char kGlobalHandleLabel[] = "DevTools console";
} // namespace
using protocol::Array;
@@ -511,6 +512,7 @@ v8::Local<v8::Value> InjectedScript::lastEvaluationResult() const {
void InjectedScript::setLastEvaluationResult(v8::Local<v8::Value> result) {
m_lastEvaluationResult.Reset(m_context->isolate(), result);
+ m_lastEvaluationResult.AnnotateStrongRetainer(kGlobalHandleLabel);
}
Response InjectedScript::resolveCallArgument(
@@ -601,8 +603,10 @@ Response InjectedScript::wrapEvaluateResult(
Response response = wrapObject(resultValue, objectGroup, returnByValue,
generatePreview, result);
if (!response.isSuccess()) return response;
- if (objectGroup == "console")
+ if (objectGroup == "console") {
m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
+ m_lastEvaluationResult.AnnotateStrongRetainer(kGlobalHandleLabel);
+ }
} else {
v8::Local<v8::Value> exception = tryCatch.Exception();
Response response =
@@ -624,6 +628,7 @@ v8::Local<v8::Object> InjectedScript::commandLineAPI() {
m_context->isolate(),
m_context->inspector()->console()->createCommandLineAPI(
m_context->context(), m_sessionId));
+ m_commandLineAPI.AnnotateStrongRetainer(kGlobalHandleLabel);
}
return m_commandLineAPI.Get(m_context->isolate());
}
@@ -769,6 +774,7 @@ int InjectedScript::bindObject(v8::Local<v8::Value> value,
if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
int id = m_lastBoundObjectId++;
m_idToWrappedObject[id].Reset(m_context->isolate(), value);
+ m_idToWrappedObject[id].AnnotateStrongRetainer(kGlobalHandleLabel);
if (!groupName.isEmpty() && id > 0) {
m_idToObjectGroupName[id] = groupName;
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 16938fb317..90a1ed3171 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_INJECTEDSCRIPT_H_
-#define V8_INSPECTOR_INJECTEDSCRIPT_H_
+#ifndef V8_INSPECTOR_INJECTED_SCRIPT_H_
+#define V8_INSPECTOR_INJECTED_SCRIPT_H_
#include <unordered_map>
#include <unordered_set>
@@ -220,4 +220,4 @@ class InjectedScript final {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_INJECTEDSCRIPT_H_
+#endif // V8_INSPECTOR_INJECTED_SCRIPT_H_
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index ac33071f62..ef0a0ca52a 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_INSPECTEDCONTEXT_H_
-#define V8_INSPECTOR_INSPECTEDCONTEXT_H_
+#ifndef V8_INSPECTOR_INSPECTED_CONTEXT_H_
+#define V8_INSPECTOR_INSPECTED_CONTEXT_H_
#include <unordered_map>
#include <unordered_set>
@@ -65,4 +65,4 @@ class InspectedContext {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_INSPECTEDCONTEXT_H_
+#endif // V8_INSPECTOR_INSPECTED_CONTEXT_H_
diff --git a/deps/v8/src/inspector/inspector.gypi b/deps/v8/src/inspector/inspector.gypi
deleted file mode 100644
index d6443283f5..0000000000
--- a/deps/v8/src/inspector/inspector.gypi
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'inspector_generated_sources': [
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Forward.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.h',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.cpp',
- '<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.h',
- '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Debugger.h',
- '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Runtime.h',
- '<(SHARED_INTERMEDIATE_DIR)/include/inspector/Schema.h',
- ],
-
- 'inspector_injected_script_source': 'injected-script-source.js',
- 'inspector_generated_injected_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/injected-script-source.h',
-
- 'inspector_all_sources': [
- '<@(inspector_generated_sources)',
- '<(inspector_generated_injected_script)',
- '../include/v8-inspector.h',
- '../include/v8-inspector-protocol.h',
- 'inspector/injected-script.cc',
- 'inspector/injected-script.h',
- 'inspector/inspected-context.cc',
- 'inspector/inspected-context.h',
- 'inspector/remote-object-id.cc',
- 'inspector/remote-object-id.h',
- 'inspector/search-util.cc',
- 'inspector/search-util.h',
- 'inspector/string-16.cc',
- 'inspector/string-16.h',
- 'inspector/string-util.cc',
- 'inspector/string-util.h',
- 'inspector/test-interface.cc',
- 'inspector/test-interface.h',
- 'inspector/v8-console.cc',
- 'inspector/v8-console.h',
- 'inspector/v8-console-agent-impl.cc',
- 'inspector/v8-console-agent-impl.h',
- 'inspector/v8-console-message.cc',
- 'inspector/v8-console-message.h',
- 'inspector/v8-debugger.cc',
- 'inspector/v8-debugger.h',
- 'inspector/v8-debugger-agent-impl.cc',
- 'inspector/v8-debugger-agent-impl.h',
- 'inspector/v8-debugger-script.cc',
- 'inspector/v8-debugger-script.h',
- 'inspector/v8-function-call.cc',
- 'inspector/v8-function-call.h',
- 'inspector/v8-heap-profiler-agent-impl.cc',
- 'inspector/v8-heap-profiler-agent-impl.h',
- 'inspector/v8-injected-script-host.cc',
- 'inspector/v8-injected-script-host.h',
- 'inspector/v8-inspector-impl.cc',
- 'inspector/v8-inspector-impl.h',
- 'inspector/v8-inspector-session-impl.cc',
- 'inspector/v8-inspector-session-impl.h',
- 'inspector/v8-internal-value-type.cc',
- 'inspector/v8-internal-value-type.h',
- 'inspector/v8-profiler-agent-impl.cc',
- 'inspector/v8-profiler-agent-impl.h',
- 'inspector/v8-regex.cc',
- 'inspector/v8-regex.h',
- 'inspector/v8-runtime-agent-impl.cc',
- 'inspector/v8-runtime-agent-impl.h',
- 'inspector/v8-schema-agent-impl.cc',
- 'inspector/v8-schema-agent-impl.h',
- 'inspector/v8-stack-trace-impl.cc',
- 'inspector/v8-stack-trace-impl.h',
- 'inspector/v8-value-utils.cc',
- 'inspector/v8-value-utils.h',
- 'inspector/wasm-translation.cc',
- 'inspector/wasm-translation.h',
- ]
- }
-}
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
index 3e6928a87e..923274236d 100644
--- a/deps/v8/src/inspector/remote-object-id.h
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_REMOTEOBJECTID_H_
-#define V8_INSPECTOR_REMOTEOBJECTID_H_
+#ifndef V8_INSPECTOR_REMOTE_OBJECT_ID_H_
+#define V8_INSPECTOR_REMOTE_OBJECT_ID_H_
#include "src/inspector/protocol/Forward.h"
@@ -54,4 +54,4 @@ class RemoteCallFrameId final : public RemoteObjectIdBase {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_REMOTEOBJECTID_H_
+#endif // V8_INSPECTOR_REMOTE_OBJECT_ID_H_
diff --git a/deps/v8/src/inspector/search-util.h b/deps/v8/src/inspector/search-util.h
index 8f5753b620..3c8a9fe31c 100644
--- a/deps/v8/src/inspector/search-util.h
+++ b/deps/v8/src/inspector/search-util.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_SEARCHUTIL_H_
-#define V8_INSPECTOR_SEARCHUTIL_H_
+#ifndef V8_INSPECTOR_SEARCH_UTIL_H_
+#define V8_INSPECTOR_SEARCH_UTIL_H_
#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/string-util.h"
@@ -21,4 +21,4 @@ searchInTextByLinesImpl(V8InspectorSession*, const String16& text,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_SEARCHUTIL_H_
+#endif // V8_INSPECTOR_SEARCH_UTIL_H_
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index dc753fee40..43343c887b 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -136,16 +136,19 @@ ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
result = targetExhausted;
break;
}
- switch (bytesToWrite) { // note: everything falls through.
+ switch (bytesToWrite) {
case 4:
*--target = static_cast<char>((ch | byteMark) & byteMask);
ch >>= 6;
+ V8_FALLTHROUGH;
case 3:
*--target = static_cast<char>((ch | byteMark) & byteMask);
ch >>= 6;
+ V8_FALLTHROUGH;
case 2:
*--target = static_cast<char>((ch | byteMark) & byteMask);
ch >>= 6;
+ V8_FALLTHROUGH;
case 1:
*--target = static_cast<char>(ch | firstByteMark[bytesToWrite]);
}
@@ -210,8 +213,10 @@ static bool isLegalUTF8(const unsigned char* source, int length) {
// Everything else falls through when "true"...
case 4:
if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ V8_FALLTHROUGH;
case 3:
if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ V8_FALLTHROUGH;
case 2:
if ((a = (*--srcptr)) > 0xBF) return false;
@@ -232,6 +237,7 @@ static bool isLegalUTF8(const unsigned char* source, int length) {
default:
if (a < 0x80) return false;
}
+ V8_FALLTHROUGH;
case 1:
if (*source >= 0x80 && *source < 0xC2) return false;
@@ -258,18 +264,23 @@ static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
case 6:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 5:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 4:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 3:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 2:
character += static_cast<unsigned char>(*sequence++);
character <<= 6;
+ V8_FALLTHROUGH;
case 1:
character += static_cast<unsigned char>(*sequence++);
}
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 1dc9350e96..7d6867dfc3 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_STRING16_H_
-#define V8_INSPECTOR_STRING16_H_
+#ifndef V8_INSPECTOR_STRING_16_H_
+#define V8_INSPECTOR_STRING_16_H_
#include <stdint.h>
#include <cctype>
@@ -149,4 +149,4 @@ struct hash<v8_inspector::String16> {
#endif // !defined(__APPLE__) || defined(_LIBCPP_VERSION)
-#endif // V8_INSPECTOR_STRING16_H_
+#endif // V8_INSPECTOR_STRING_16_H_
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 8aaf3ce850..0c025ef93a 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_STRINGUTIL_H_
-#define V8_INSPECTOR_STRINGUTIL_H_
+#ifndef V8_INSPECTOR_STRING_UTIL_H_
+#define V8_INSPECTOR_STRING_UTIL_H_
#include <memory>
@@ -92,4 +92,4 @@ String16 stackTraceIdToString(uintptr_t id);
} // namespace v8_inspector
-#endif // V8_INSPECTOR_STRINGUTIL_H_
+#endif // V8_INSPECTOR_STRING_UTIL_H_
diff --git a/deps/v8/src/inspector/test-interface.h b/deps/v8/src/inspector/test-interface.h
index 70fbca186f..946d1f6020 100644
--- a/deps/v8/src/inspector/test-interface.h
+++ b/deps/v8/src/inspector/test-interface.h
@@ -16,4 +16,4 @@ V8_EXPORT void DumpAsyncTaskStacksStateForTest(V8Inspector* inspector);
} // v8_inspector
-#endif // V8_INSPECTOR_TEST_INTERFACE_H_
+#endif // V8_INSPECTOR_TEST_INTERFACE_H_
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.h b/deps/v8/src/inspector/v8-console-agent-impl.h
index db17e54718..f436aa2f5c 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.h
+++ b/deps/v8/src/inspector/v8-console-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
-#define V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_CONSOLE_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_CONSOLE_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Console.h"
@@ -45,4 +45,4 @@ class V8ConsoleAgentImpl : public protocol::Console::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8CONSOLEAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_CONSOLE_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index e96e89c0eb..ea50a8dfee 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -58,6 +58,7 @@ String16 consoleAPITypeValue(ConsoleAPIType type) {
return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
}
+const char kGlobalConsoleMessageHandleLabel[] = "DevTools console";
const unsigned maxConsoleMessageCount = 1000;
const int maxConsoleMessageV8Size = 10 * 1024 * 1024;
const unsigned maxArrayItemsLimit = 10000;
@@ -379,8 +380,10 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
message->m_type = type;
message->m_contextId = contextId;
for (size_t i = 0; i < arguments.size(); ++i) {
- message->m_arguments.push_back(std::unique_ptr<v8::Global<v8::Value>>(
- new v8::Global<v8::Value>(isolate, arguments.at(i))));
+ std::unique_ptr<v8::Global<v8::Value>> argument(
+ new v8::Global<v8::Value>(isolate, arguments.at(i)));
+ argument->AnnotateStrongRetainer(kGlobalConsoleMessageHandleLabel);
+ message->m_arguments.push_back(std::move(argument));
message->m_v8Size +=
v8::debug::EstimatedValueSize(isolate, arguments.at(i));
}
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index f82f8e5a13..103cb9002b 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8CONSOLEMESSAGE_H_
-#define V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+#ifndef V8_INSPECTOR_V8_CONSOLE_MESSAGE_H_
+#define V8_INSPECTOR_V8_CONSOLE_MESSAGE_H_
#include <deque>
#include <map>
@@ -138,4 +138,4 @@ class V8ConsoleMessageStorage {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8CONSOLEMESSAGE_H_
+#endif // V8_INSPECTOR_V8_CONSOLE_MESSAGE_H_
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index ba4dfe328b..311625efde 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8CONSOLE_H_
-#define V8_INSPECTOR_V8CONSOLE_H_
+#ifndef V8_INSPECTOR_V8_CONSOLE_H_
+#define V8_INSPECTOR_V8_CONSOLE_H_
#include "src/base/macros.h"
@@ -172,4 +172,4 @@ class V8Console : public v8::debug::ConsoleDelegate {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8CONSOLE_H_
+#endif // V8_INSPECTOR_V8_CONSOLE_H_
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 7bfde09b71..78325ef978 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -57,6 +57,8 @@ static const char kDebuggerNotPaused[] =
static const size_t kBreakpointHintMaxLength = 128;
static const intptr_t kBreakpointHintMaxSearchOffset = 80 * 10;
+static const int kMaxScriptFailedToParseScripts = 1000;
+
namespace {
void TranslateLocation(protocol::Debugger::Location* location,
@@ -1416,7 +1418,13 @@ void V8DebuggerAgentImpl::didParseSource(
static_cast<int>(scriptRef->source().length()), std::move(stackTrace));
}
- if (!success) return;
+ if (!success) {
+ if (scriptURL.isEmpty()) {
+ m_failedToParseAnonymousScriptIds.push_back(scriptId);
+ cleanupOldFailedToParseAnonymousScriptsIfNeeded();
+ }
+ return;
+ }
std::vector<protocol::DictionaryValue*> potentialBreakpoints;
if (!scriptURL.isEmpty()) {
@@ -1618,4 +1626,18 @@ void V8DebuggerAgentImpl::reset() {
m_breakpointIdToDebuggerBreakpointIds.clear();
}
+void V8DebuggerAgentImpl::cleanupOldFailedToParseAnonymousScriptsIfNeeded() {
+ if (m_failedToParseAnonymousScriptIds.size() <=
+ kMaxScriptFailedToParseScripts)
+ return;
+ static_assert(kMaxScriptFailedToParseScripts > 100,
+ "kMaxScriptFailedToParseScripts should be greater then 100");
+ while (m_failedToParseAnonymousScriptIds.size() >
+ kMaxScriptFailedToParseScripts - 100 + 1) {
+ String16 scriptId = m_failedToParseAnonymousScriptIds.front();
+ m_failedToParseAnonymousScriptIds.pop_front();
+ m_scripts.erase(scriptId);
+ }
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 168c5a7724..6feaeff914 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
-#define V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
+#include <deque>
#include <vector>
#include "src/base/macros.h"
@@ -192,6 +193,9 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
DebuggerBreakpointIdToBreakpointIdMap m_debuggerBreakpointIdToBreakpointId;
+ std::deque<String16> m_failedToParseAnonymousScriptIds;
+ void cleanupOldFailedToParseAnonymousScriptsIfNeeded();
+
using BreakReason =
std::pair<String16, std::unique_ptr<protocol::DictionaryValue>>;
std::vector<BreakReason> m_breakReason;
@@ -215,4 +219,4 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type);
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8DEBUGGERAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_DEBUGGER_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index 6ec7f32c89..c596ee5053 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -7,12 +7,14 @@
#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
#include "src/inspector/wasm-translation.h"
+#include "src/utils.h"
namespace v8_inspector {
namespace {
const char hexDigits[17] = "0123456789ABCDEF";
+const char kGlobalDebuggerScriptHandleLabel[] = "DevTools debugger";
void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
for (size_t i = 0; i < 8; ++i) {
@@ -43,11 +45,12 @@ String16 calculateHash(const String16& str) {
const uint32_t* data = nullptr;
size_t sizeInBytes = sizeof(UChar) * str.length();
data = reinterpret_cast<const uint32_t*>(str.characters16());
- for (size_t i = 0; i < sizeInBytes / 4; i += 4) {
+ for (size_t i = 0; i < sizeInBytes / 4; ++i) {
+ uint32_t d = v8::internal::ReadUnalignedUInt32(data + i);
#if V8_TARGET_LITTLE_ENDIAN
- uint32_t v = data[i];
+ uint32_t v = d;
#else
- uint32_t v = (data[i] << 16) | (data[i] >> 16);
+ uint32_t v = (d << 16) | (d >> 16);
#endif
uint64_t xi = v * randomOdd[current] & 0x7FFFFFFF;
hashes[current] = (hashes[current] + zi[current] * xi) % prime[current];
@@ -56,15 +59,16 @@ String16 calculateHash(const String16& str) {
}
if (sizeInBytes % 4) {
uint32_t v = 0;
+ const uint8_t* data_8b = reinterpret_cast<const uint8_t*>(data);
for (size_t i = sizeInBytes - sizeInBytes % 4; i < sizeInBytes; ++i) {
v <<= 8;
#if V8_TARGET_LITTLE_ENDIAN
- v |= reinterpret_cast<const uint8_t*>(data)[i];
+ v |= data_8b[i];
#else
if (i % 2) {
- v |= reinterpret_cast<const uint8_t*>(data)[i - 1];
+ v |= data_8b[i - 1];
} else {
- v |= reinterpret_cast<const uint8_t*>(data)[i + 1];
+ v |= data_8b[i + 1];
}
#endif
}
@@ -147,6 +151,7 @@ class ActualScript : public V8DebuggerScript {
m_isModule = script->IsModule();
m_script.Reset(m_isolate, script);
+ m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
}
bool isLiveEdit() const override { return m_isLiveEdit; }
@@ -264,6 +269,7 @@ class WasmVirtualScript : public V8DebuggerScript {
: V8DebuggerScript(isolate, std::move(id), std::move(url)),
m_script(isolate, script),
m_wasmTranslation(wasmTranslation) {
+ m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
int num_lines = 0;
int last_newline = -1;
size_t next_newline = source.find('\n', last_newline + 1);
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index f1e28184b5..6badd87c97 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -27,8 +27,8 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
-#define V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+#ifndef V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
+#define V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
@@ -111,4 +111,4 @@ class V8DebuggerScript {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8DEBUGGERSCRIPT_H_
+#endif // V8_INSPECTOR_V8_DEBUGGER_SCRIPT_H_
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index c86f320252..9b0ca38018 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -494,7 +494,6 @@ void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
void V8Debugger::BreakProgramRequested(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Object>,
- v8::Local<v8::Value>,
const std::vector<v8::debug::BreakpointId>& break_points_hit) {
handleProgramBreak(pausedContext, v8::Local<v8::Value>(), break_points_hit);
}
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 4828fcad52..a710726581 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8DEBUGGER_H_
-#define V8_INSPECTOR_V8DEBUGGER_H_
+#ifndef V8_INSPECTOR_V8_DEBUGGER_H_
+#define V8_INSPECTOR_V8_DEBUGGER_H_
#include <list>
#include <unordered_map>
@@ -169,7 +169,6 @@ class V8Debugger : public v8::debug::DebugDelegate {
bool has_compile_error) override;
void BreakProgramRequested(
v8::Local<v8::Context> paused_context, v8::Local<v8::Object>,
- v8::Local<v8::Value>,
const std::vector<v8::debug::BreakpointId>& break_points_hit) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object>, v8::Local<v8::Value> exception,
@@ -240,4 +239,4 @@ class V8Debugger : public v8::debug::DebugDelegate {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8DEBUGGER_H_
+#endif // V8_INSPECTOR_V8_DEBUGGER_H_
diff --git a/deps/v8/src/inspector/v8-function-call.h b/deps/v8/src/inspector/v8-function-call.h
index 0337caa339..28a5886c91 100644
--- a/deps/v8/src/inspector/v8-function-call.h
+++ b/deps/v8/src/inspector/v8-function-call.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8FUNCTIONCALL_H_
-#define V8_INSPECTOR_V8FUNCTIONCALL_H_
+#ifndef V8_INSPECTOR_V8_FUNCTION_CALL_H_
+#define V8_INSPECTOR_V8_FUNCTION_CALL_H_
#include "src/inspector/string-16.h"
@@ -62,4 +62,4 @@ class V8FunctionCall {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8FUNCTIONCALL_H_
+#endif // V8_INSPECTOR_V8_FUNCTION_CALL_H_
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
index 7491a80f10..5c2107d573 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
-#define V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
@@ -66,4 +66,4 @@ class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8HEAPPROFILERAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_HEAP_PROFILER_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-injected-script-host.h b/deps/v8/src/inspector/v8-injected-script-host.h
index 18f9139d63..6a3ee3d386 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.h
+++ b/deps/v8/src/inspector/v8-injected-script-host.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
-#define V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+#ifndef V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
+#define V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
#include "include/v8.h"
@@ -50,4 +50,4 @@ class V8InjectedScriptHost {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INJECTEDSCRIPTHOST_H_
+#endif // V8_INSPECTOR_V8_INJECTED_SCRIPT_HOST_H_
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 92e7b21960..0627eae317 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8INSPECTORIMPL_H_
-#define V8_INSPECTOR_V8INSPECTORIMPL_H_
+#ifndef V8_INSPECTOR_V8_INSPECTOR_IMPL_H_
+#define V8_INSPECTOR_V8_INSPECTOR_IMPL_H_
#include <functional>
#include <map>
@@ -154,4 +154,4 @@ class V8InspectorImpl : public V8Inspector {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INSPECTORIMPL_H_
+#endif // V8_INSPECTOR_V8_INSPECTOR_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 4fb924f749..8ca0915b66 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
-#define V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+#ifndef V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
+#define V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
#include <vector>
@@ -126,4 +126,4 @@ class V8InspectorSessionImpl : public V8InspectorSession,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INSPECTORSESSIONIMPL_H_
+#endif // V8_INSPECTOR_V8_INSPECTOR_SESSION_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-internal-value-type.h b/deps/v8/src/inspector/v8-internal-value-type.h
index e648a0d4a3..991919a82e 100644
--- a/deps/v8/src/inspector/v8-internal-value-type.h
+++ b/deps/v8/src/inspector/v8-internal-value-type.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8INTERNALVALUETYPE_H_
-#define V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+#ifndef V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
+#define V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
#include "include/v8.h"
@@ -20,4 +20,4 @@ v8::Local<v8::Value> v8InternalValueTypeFrom(v8::Local<v8::Context>,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8INTERNALVALUETYPE_H_
+#endif // V8_INSPECTOR_V8_INTERNAL_VALUE_TYPE_H_
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index e758a900fa..a68ea1144c 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
-#define V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
#include <vector>
@@ -85,4 +85,4 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8PROFILERAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_PROFILER_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-regex.h b/deps/v8/src/inspector/v8-regex.h
index b4b1f8ce13..0c4136fc8b 100644
--- a/deps/v8/src/inspector/v8-regex.h
+++ b/deps/v8/src/inspector/v8-regex.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8REGEX_H_
-#define V8_INSPECTOR_V8REGEX_H_
+#ifndef V8_INSPECTOR_V8_REGEX_H_
+#define V8_INSPECTOR_V8_REGEX_H_
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
@@ -34,4 +34,4 @@ class V8Regex {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8REGEX_H_
+#endif // V8_INSPECTOR_V8_REGEX_H_
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 22d48e23bf..6975f35e71 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -255,13 +255,12 @@ void V8RuntimeAgentImpl::evaluate(
if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(true);
v8::MaybeLocal<v8::Value> maybeResultValue;
- v8::Local<v8::Script> script;
- if (m_inspector->compileScript(scope.context(), expression, String16())
- .ToLocal(&script)) {
+ {
v8::MicrotasksScope microtasksScope(m_inspector->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
- maybeResultValue = script->Run(scope.context());
- }
+ maybeResultValue = v8::debug::EvaluateGlobal(
+ m_inspector->isolate(), toV8String(m_inspector->isolate(), expression));
+ } // Run microtasks before returning result.
if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(false);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index cc63b697c9..790654da08 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -28,8 +28,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
-#define V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
@@ -129,4 +129,4 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8RUNTIMEAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.h b/deps/v8/src/inspector/v8-schema-agent-impl.h
index e733aa0d5a..b96cce1401 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.h
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
-#define V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+#ifndef V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
+#define V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
@@ -33,4 +33,4 @@ class V8SchemaAgentImpl : public protocol::Schema::Backend {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8SCHEMAAGENTIMPL_H_
+#endif // V8_INSPECTOR_V8_SCHEMA_AGENT_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 08d98110ae..87d2b0f027 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8STACKTRACEIMPL_H_
-#define V8_INSPECTOR_V8STACKTRACEIMPL_H_
+#ifndef V8_INSPECTOR_V8_STACK_TRACE_IMPL_H_
+#define V8_INSPECTOR_V8_STACK_TRACE_IMPL_H_
#include <memory>
#include <vector>
@@ -145,4 +145,4 @@ class AsyncStackTrace {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8STACKTRACEIMPL_H_
+#endif // V8_INSPECTOR_V8_STACK_TRACE_IMPL_H_
diff --git a/deps/v8/src/inspector/v8-value-utils.h b/deps/v8/src/inspector/v8-value-utils.h
index 4d7b77077f..029fee224b 100644
--- a/deps/v8/src/inspector/v8-value-utils.h
+++ b/deps/v8/src/inspector/v8-value-utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_V8VALUEUTILS_H_
-#define V8_INSPECTOR_V8VALUEUTILS_H_
+#ifndef V8_INSPECTOR_V8_VALUE_UTILS_H_
+#define V8_INSPECTOR_V8_VALUE_UTILS_H_
#include "src/inspector/protocol/Protocol.h"
@@ -23,4 +23,4 @@ protocol::Response toProtocolValue(v8::Local<v8::Context>, v8::Local<v8::Value>,
} // namespace v8_inspector
-#endif // V8_INSPECTOR_V8VALUEUTILS_H_
+#endif // V8_INSPECTOR_V8_VALUE_UTILS_H_
diff --git a/deps/v8/src/inspector/wasm-translation.h b/deps/v8/src/inspector/wasm-translation.h
index 2162edee67..9bd33c0bc8 100644
--- a/deps/v8/src/inspector/wasm-translation.h
+++ b/deps/v8/src/inspector/wasm-translation.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INSPECTOR_WASMTRANSLATION_H_
-#define V8_INSPECTOR_WASMTRANSLATION_H_
+#ifndef V8_INSPECTOR_WASM_TRANSLATION_H_
+#define V8_INSPECTOR_WASM_TRANSLATION_H_
#include <unordered_map>
@@ -72,4 +72,4 @@ class WasmTranslation {
} // namespace v8_inspector
-#endif // V8_INSPECTOR_WASMTRANSLATION_H_
+#endif // V8_INSPECTOR_WASM_TRANSLATION_H_
diff --git a/deps/v8/src/instruction-stream.cc b/deps/v8/src/instruction-stream.cc
new file mode 100644
index 0000000000..7d00ea5434
--- /dev/null
+++ b/deps/v8/src/instruction-stream.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/instruction-stream.h"
+
+#include "src/builtins/builtins.h"
+#include "src/heap/heap.h"
+#include "src/objects-inl.h"
+#include "src/objects/code-inl.h"
+
+namespace v8 {
+namespace internal {
+
+InstructionStream::InstructionStream(Code* code)
+ : builtin_index_(code->builtin_index()) {
+ DCHECK(Builtins::IsOffHeapBuiltin(code));
+ const size_t page_size = AllocatePageSize();
+ byte_length_ =
+ RoundUp(static_cast<size_t>(code->instruction_size()), page_size);
+
+ bytes_ = static_cast<uint8_t*>(AllocatePages(
+ GetRandomMmapAddr(), byte_length_, page_size, PageAllocator::kReadWrite));
+ CHECK_NOT_NULL(bytes_);
+
+ std::memcpy(bytes_, code->instruction_start(), code->instruction_size());
+ CHECK(SetPermissions(bytes_, byte_length_, PageAllocator::kReadExecute));
+}
+
+InstructionStream::~InstructionStream() {
+ CHECK(FreePages(bytes_, byte_length_));
+}
+
+// static
+Code* InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
+ DCHECK(FLAG_stress_off_heap_code);
+ // TODO(jgruber,v8:6666): Replace with binary search through range checks
+ // once off-heap code is mapped into a contiguous memory space.
+ for (const InstructionStream* stream : isolate->off_heap_code_) {
+ if (stream->Contains(address)) {
+ return isolate->builtins()->builtin(stream->builtin_index());
+ }
+ }
+ return nullptr;
+}
+
+// static
+InstructionStream* InstructionStream::TryLookupInstructionStream(
+ Isolate* isolate, Code* code) {
+ DCHECK(FLAG_stress_off_heap_code);
+ // TODO(jgruber,v8:6666): Replace with binary search through range checks
+ // once off-heap code is mapped into a contiguous memory space.
+ const int builtin_index = code->builtin_index();
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ for (InstructionStream* stream : isolate->off_heap_code_) {
+ if (stream->builtin_index() == builtin_index) return stream;
+ }
+ return nullptr;
+}
+
+bool InstructionStream::Contains(Address address) const {
+ return bytes_ <= address && address < bytes_ + byte_length_;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/instruction-stream.h b/deps/v8/src/instruction-stream.h
new file mode 100644
index 0000000000..750e94a955
--- /dev/null
+++ b/deps/v8/src/instruction-stream.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSTRUCTION_STREAM_H_
+#define V8_INSTRUCTION_STREAM_H_
+
+#include "src/base/macros.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class Isolate;
+
+// Wraps an mmap'ed off-heap instruction stream. This class will likely become
+// unneeded once --stress-off-heap-code is removed.
+class InstructionStream final {
+ public:
+ explicit InstructionStream(Code* code);
+ ~InstructionStream();
+
+ // Returns the corresponding Code object if it exists, and nullptr otherwise.
+ static Code* TryLookupCode(Isolate* isolate, Address address);
+
+ // Returns the corresponding stream if it exists, and nullptr otherwise.
+ static InstructionStream* TryLookupInstructionStream(Isolate* isolate,
+ Code* code);
+
+ bool Contains(Address address) const;
+
+ int builtin_index() const { return builtin_index_; }
+ size_t byte_length() const { return byte_length_; }
+ uint8_t* bytes() const { return bytes_; }
+
+ private:
+ size_t byte_length_;
+ uint8_t* bytes_;
+ int builtin_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionStream)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INSTRUCTION_STREAM_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 3b466aceb9..9771f0e00c 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -284,6 +284,21 @@ void StringAtDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void StringSubstringDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kString, kFrom, kTo
+ // TODO(turbofan): Allow builtins to return untagged values.
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::IntPtr(), MachineType::IntPtr()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StringSubstringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void TypeConversionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ArgumentRegister()};
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 12b25a510a..dd704144de 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CALL_INTERFACE_DESCRIPTOR_H_
-#define V8_CALL_INTERFACE_DESCRIPTOR_H_
+#ifndef V8_INTERFACE_DESCRIPTORS_H_
+#define V8_INTERFACE_DESCRIPTORS_H_
#include <memory>
@@ -30,7 +30,6 @@ class PlatformInterfaceDescriptor;
V(StoreTransition) \
V(StoreGlobal) \
V(StoreGlobalWithVector) \
- V(FastNewClosure) \
V(FastNewFunctionContext) \
V(FastNewObject) \
V(FastNewArguments) \
@@ -63,6 +62,7 @@ class PlatformInterfaceDescriptor;
V(BinaryOp) \
V(StringAdd) \
V(StringAt) \
+ V(StringSubstring) \
V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
@@ -80,6 +80,7 @@ class PlatformInterfaceDescriptor;
V(FrameDropperTrampoline) \
V(WasmRuntimeCall) \
V(RunMicrotasks) \
+ V(PromiseReactionHandler) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -514,12 +515,6 @@ class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
}
};
-class FastNewClosureDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kSharedFunctionInfo, kVector, kSlot)
- DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
-};
-
class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kSlots)
@@ -770,6 +765,13 @@ class StringAtDescriptor final : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
+class StringSubstringDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kString, kFrom, kTo)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringSubstringDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
@@ -884,6 +886,13 @@ class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
0)
};
+class PromiseReactionHandlerDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kArgument, kGenerator)
+ DECLARE_DEFAULT_DESCRIPTOR(PromiseReactionHandlerDescriptor,
+ CallInterfaceDescriptor, 2)
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
@@ -917,4 +926,4 @@ INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
#include "src/arm/interface-descriptors-arm.h"
#endif
-#endif // V8_CALL_INTERFACE_DESCRIPTOR_H_
+#endif // V8_INTERFACE_DESCRIPTORS_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index 784bb14eb6..2a5923b2a4 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/feedback-vector.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
@@ -125,6 +126,11 @@ uint32_t BytecodeArrayAccessor::GetIndexOperand(int operand_index) const {
return GetUnsignedOperand(operand_index, operand_type);
}
+FeedbackSlot BytecodeArrayAccessor::GetSlotOperand(int operand_index) const {
+ int index = GetIndexOperand(operand_index);
+ return FeedbackVector::ToSlot(index);
+}
+
Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
@@ -206,12 +212,18 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
JumpTableTargetOffsets BytecodeArrayAccessor::GetJumpTableTargetOffsets()
const {
- DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
-
- uint32_t table_start = GetIndexOperand(0);
- uint32_t table_size = GetUnsignedImmediateOperand(1);
- int32_t case_value_base = GetImmediateOperand(2);
-
+ uint32_t table_start, table_size;
+ int32_t case_value_base;
+ if (current_bytecode() == Bytecode::kSwitchOnGeneratorState) {
+ table_start = GetIndexOperand(1);
+ table_size = GetUnsignedImmediateOperand(2);
+ case_value_base = 0;
+ } else {
+ DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
+ table_start = GetIndexOperand(0);
+ table_size = GetUnsignedImmediateOperand(1);
+ case_value_base = GetImmediateOperand(2);
+ }
return JumpTableTargetOffsets(this, table_start, table_size, case_value_base);
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index d585e6dc33..f31d2d0e7f 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -83,6 +83,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
uint32_t GetUnsignedImmediateOperand(int operand_index) const;
int32_t GetImmediateOperand(int operand_index) const;
uint32_t GetIndexOperand(int operand_index) const;
+ FeedbackSlot GetSlotOperand(int operand_index) const;
uint32_t GetRegisterCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
int GetRegisterOperandRange(int operand_index) const;
@@ -130,4 +131,4 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETER_BYTECODE_GRAPH_ACCESSOR_H_
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index dcbe8029f9..2d156e4095 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -92,7 +92,7 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
register_count = register_optimizer_->maxiumum_register_index() + 1;
}
- Handle<FixedArray> handler_table =
+ Handle<ByteArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate);
return bytecode_array_writer_.ToBytecodeArray(
isolate, register_count, parameter_count(), handler_table);
@@ -973,8 +973,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEmptyObjectLiteral() {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::GetTemplateObject(
- size_t template_object_description_entry) {
- OutputGetTemplateObject(template_object_description_entry);
+ size_t template_object_description_entry, int feedback_slot) {
+ OutputGetTemplateObject(template_object_description_entry, feedback_slot);
return *this;
}
@@ -1271,16 +1271,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::RestoreGeneratorState(
- Register generator) {
- OutputRestoreGeneratorState(generator);
+BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnGeneratorState(
+ Register generator, BytecodeJumpTable* jump_table) {
+ DCHECK_EQ(jump_table->case_value_base(), 0);
+ BytecodeNode node(CreateSwitchOnGeneratorStateNode(
+ generator, jump_table->constant_pool_index(), jump_table->size()));
+ WriteSwitch(&node, jump_table);
+ LeaveBasicBlock();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
- Register generator, Register generator_state, RegisterList registers) {
- OutputResumeGenerator(generator, generator_state, registers,
- registers.register_count());
+ Register generator, RegisterList registers) {
+ OutputResumeGenerator(generator, registers, registers.register_count());
return *this;
}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 021222abe5..05086bf714 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -240,7 +240,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Gets or creates the template for a TemplateObjectDescription which will
// be inserted at constant pool index |template_object_description_entry|.
BytecodeArrayBuilder& GetTemplateObject(
- size_t template_object_description_entry);
+ size_t template_object_description_entry, int feedback_slot);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -354,6 +354,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// the key to be deleted and the register contains a reference to the object.
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
+ // JavaScript defines two kinds of 'nil'.
+ enum NilValue { kNullValue, kUndefinedValue };
+
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
int feedback_slot);
@@ -430,9 +433,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& SuspendGenerator(Register generator,
RegisterList registers,
int suspend_id);
- BytecodeArrayBuilder& RestoreGeneratorState(Register generator);
+ BytecodeArrayBuilder& SwitchOnGeneratorState(Register generator,
+ BytecodeJumpTable* jump_table);
BytecodeArrayBuilder& ResumeGenerator(Register generator,
- Register generator_state,
RegisterList registers);
// Exception handling.
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 9aea3d83fa..81f49baeea 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -38,7 +38,7 @@ BytecodeArrayWriter::BytecodeArrayWriter(
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) {
+ Handle<ByteArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
int bytecode_size = static_cast<int>(bytecodes()->size());
@@ -158,6 +158,7 @@ void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) {
case Bytecode::kAbort:
case Bytecode::kJump:
case Bytecode::kJumpConstant:
+ case Bytecode::kSuspendGenerator:
exit_seen_in_block_ = true;
break;
default:
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index c53df10129..9700d2c1cf 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -43,7 +43,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value);
Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
int parameter_count,
- Handle<FixedArray> handler_table);
+ Handle<ByteArray> handler_table);
private:
// Maximum sized packed bytecode is comprised of a prefix bytecode,
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index ee94e7a2e2..997c5a8da8 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -853,8 +853,22 @@ class BytecodeGenerator::IteratorRecord final {
Register next_;
};
+#ifdef DEBUG
+
+static bool IsInEagerLiterals(
+ FunctionLiteral* literal,
+ const ZoneVector<FunctionLiteral*>& eager_literals) {
+ for (FunctionLiteral* eager_literal : eager_literals) {
+ if (literal == eager_literal) return true;
+ }
+ return false;
+}
+
+#endif // DEBUG
+
BytecodeGenerator::BytecodeGenerator(
- CompilationInfo* info, const AstStringConstants* ast_string_constants)
+ CompilationInfo* info, const AstStringConstants* ast_string_constants,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals)
: zone_(info->zone()),
builder_(zone(), info->num_parameters_including_this(),
info->scope()->num_stack_slots(), info->feedback_vector_spec(),
@@ -863,6 +877,7 @@ BytecodeGenerator::BytecodeGenerator(
ast_string_constants_(ast_string_constants),
closure_scope_(info->scope()),
current_scope_(info->scope()),
+ eager_inner_literals_(eager_inner_literals),
feedback_slot_cache_(new (zone()) FeedbackSlotCache(zone())),
globals_builder_(new (zone()) GlobalDeclarationsBuilder(zone())),
block_coverage_builder_(nullptr),
@@ -878,7 +893,7 @@ BytecodeGenerator::BytecodeGenerator(
execution_result_(nullptr),
incoming_new_target_or_generator_(),
generator_jump_table_(nullptr),
- generator_state_(),
+ suspend_count_(0),
loop_depth_(0),
catch_prediction_(HandlerTable::UNCAUGHT) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
@@ -1091,8 +1106,6 @@ void BytecodeGenerator::GenerateBytecodeBody() {
void BytecodeGenerator::AllocateTopLevelRegisters() {
if (info()->literal()->CanSuspend()) {
- // Allocate a register for generator_state_.
- generator_state_ = register_allocator()->NewRegister();
// Either directly use generator_object_var or allocate a new register for
// the incoming generator object.
Variable* generator_object_var = closure_scope()->generator_object_var();
@@ -1115,81 +1128,19 @@ void BytecodeGenerator::AllocateTopLevelRegisters() {
}
}
-void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
- LoopBuilder* loop_builder) {
- VisitIterationHeader(stmt->first_suspend_id(), stmt->suspend_count(),
- loop_builder);
-}
-
-void BytecodeGenerator::VisitIterationHeader(int first_suspend_id,
- int suspend_count,
- LoopBuilder* loop_builder) {
- // Recall that suspend_count is always zero inside ordinary (i.e.
- // non-generator) functions.
- if (suspend_count == 0) {
- loop_builder->LoopHeader();
- } else {
- loop_builder->LoopHeaderInGenerator(&generator_jump_table_,
- first_suspend_id, suspend_count);
-
- // Perform state dispatch on the generator state, assuming this is a resume.
- builder()
- ->LoadAccumulatorWithRegister(generator_state_)
- .SwitchOnSmiNoFeedback(generator_jump_table_);
-
- // We fall through when the generator state is not in the jump table. If we
- // are not resuming, we want to fall through to the loop body.
- // TODO(leszeks): Only generate this test for debug builds, we can skip it
- // entirely in release assuming that the generator states is always valid.
- BytecodeLabel not_resuming;
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .CompareOperation(Token::Value::EQ_STRICT, generator_state_)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &not_resuming);
-
- // Otherwise this is an error.
- builder()->Abort(AbortReason::kInvalidJumpTableIndex);
-
- builder()->Bind(&not_resuming);
- }
-}
-
void BytecodeGenerator::BuildGeneratorPrologue() {
DCHECK_GT(info()->literal()->suspend_count(), 0);
- DCHECK(generator_state_.is_valid());
DCHECK(generator_object().is_valid());
generator_jump_table_ =
builder()->AllocateJumpTable(info()->literal()->suspend_count(), 0);
- BytecodeLabel regular_call;
- builder()
- ->LoadAccumulatorWithRegister(generator_object())
- .JumpIfUndefined(&regular_call);
-
- // This is a resume call. Restore the current context and the registers,
- // then perform state dispatch.
- {
- RegisterAllocationScope register_scope(this);
- Register generator_context = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object())
- .PushContext(generator_context)
- .RestoreGeneratorState(generator_object())
- .StoreAccumulatorInRegister(generator_state_)
- .SwitchOnSmiNoFeedback(generator_jump_table_);
- }
- // We fall through when the generator state is not in the jump table.
- // TODO(leszeks): Only generate this for debug builds.
- builder()->Abort(AbortReason::kInvalidJumpTableIndex);
+ // If the generator is not undefined, this is a resume, so perform state
+ // dispatch.
+ builder()->SwitchOnGeneratorState(generator_object(), generator_jump_table_);
- // This is a regular call.
- builder()
- ->Bind(&regular_call)
- .LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .StoreAccumulatorInRegister(generator_state_);
- // Now fall through to the ordinary function prologue, after which we will run
- // into the generator object creation and other extra code inserted by the
- // parser.
+ // Otherwise, fall-through to the ordinary function prologue, after which we
+ // will run into the generator object creation and other extra code inserted
+ // by the parser.
}
void BytecodeGenerator::VisitBlock(Block* stmt) {
@@ -1274,6 +1225,7 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
FeedbackSlot literal_slot = GetCachedCreateClosureSlot(decl->fun());
globals_builder()->AddFunctionDeclaration(variable->raw_name(), slot,
literal_slot, decl->fun());
+ AddToEagerLiteralsIfEager(decl->fun());
break;
}
case VariableLocation::PARAMETER:
@@ -1306,6 +1258,8 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
BuildVariableAssignment(variable, Token::INIT, HoleCheckMode::kElided);
break;
}
+ DCHECK_IMPLIES(decl->fun()->ShouldEagerCompile(),
+ IsInEagerLiterals(decl->fun(), *eager_inner_literals_));
}
void BytecodeGenerator::VisitModuleNamespaceImports() {
@@ -1505,11 +1459,11 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
if (stmt->cond()->ToBooleanIsFalse()) {
VisitIterationBody(stmt, &loop_builder);
} else if (stmt->cond()->ToBooleanIsTrue()) {
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader(loop_depth_);
} else {
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
VisitIterationBody(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_backbranch(zone());
@@ -1528,7 +1482,7 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
return;
}
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1552,7 +1506,7 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
return;
}
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
BytecodeLabels loop_body(zone());
@@ -1670,7 +1624,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The loop
{
LoopBuilder loop_builder(builder(), block_coverage_builder_, stmt);
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
builder()->SetExpressionAsStatementPosition(stmt->each());
builder()->ForInContinue(index, cache_length);
loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
@@ -1694,7 +1648,7 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
VisitForEffect(stmt->assign_iterator());
VisitForEffect(stmt->assign_next());
- VisitIterationHeader(stmt, &loop_builder);
+ loop_builder.LoopHeader();
builder()->SetExpressionAsStatementPosition(stmt->next_result());
VisitForEffect(stmt->next_result());
TypeHint type_hint = VisitForAccumulatorValue(stmt->result_done());
@@ -1832,6 +1786,14 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
FeedbackSlot slot = GetCachedCreateClosureSlot(expr);
builder()->CreateClosure(entry, feedback_index(slot), flags);
function_literals_.push_back(std::make_pair(expr, entry));
+ AddToEagerLiteralsIfEager(expr);
+}
+
+void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) {
+ if (eager_inner_literals_ && literal->ShouldEagerCompile()) {
+ DCHECK(!IsInEagerLiterals(literal, *eager_inner_literals_));
+ eager_inner_literals_->push_back(literal);
+ }
}
void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
@@ -1867,6 +1829,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
if (property->is_computed_name()) {
+ DCHECK_NE(property->kind(), ClassLiteral::Property::PRIVATE_FIELD);
Register key = register_allocator()->GrowRegisterList(&args);
BuildLoadPropertyKey(property, key);
@@ -1884,7 +1847,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
.Bind(&done);
}
- if (property->kind() == ClassLiteral::Property::FIELD) {
+ if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
// Initialize field's name variable with the computed name.
DCHECK_NOT_NULL(property->computed_name_var());
builder()->LoadAccumulatorWithRegister(key);
@@ -1892,11 +1855,19 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
HoleCheckMode::kElided);
}
}
- if (property->kind() == ClassLiteral::Property::FIELD) {
+
+ if (property->kind() == ClassLiteral::Property::PUBLIC_FIELD) {
// We don't compute field's value here, but instead do it in the
// initializer function.
continue;
+ } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
+ builder()->CallRuntime(Runtime::kCreatePrivateFieldSymbol);
+ DCHECK_NOT_NULL(property->private_field_name_var());
+ BuildVariableAssignment(property->private_field_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ continue;
}
+
Register value = register_allocator()->GrowRegisterList(&args);
VisitForRegisterValue(property->value(), value);
}
@@ -1976,12 +1947,18 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
ClassLiteral::Property* property = expr->fields()->at(i);
if (property->is_computed_name()) {
+ DCHECK_EQ(property->kind(), ClassLiteral::Property::PUBLIC_FIELD);
Variable* var = property->computed_name_var();
DCHECK_NOT_NULL(var);
// The computed name is already evaluated and stored in a
// variable at class definition time.
BuildVariableLoad(var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
+ } else if (property->kind() == ClassLiteral::Property::PRIVATE_FIELD) {
+ Variable* private_field_name_var = property->private_field_name_var();
+ DCHECK_NOT_NULL(private_field_name_var);
+ BuildVariableLoad(private_field_name_var, HoleCheckMode::kElided);
+ builder()->StoreAccumulatorInRegister(key);
} else {
BuildLoadPropertyKey(property, key);
}
@@ -1989,7 +1966,11 @@ void BytecodeGenerator::VisitInitializeClassFieldsStatement(
VisitForRegisterValue(property->value(), value);
VisitSetHomeObject(value, constructor, property);
- builder()->CallRuntime(Runtime::kCreateDataProperty, args);
+ Runtime::FunctionId function_id =
+ property->kind() == ClassLiteral::Property::PUBLIC_FIELD
+ ? Runtime::kCreateDataProperty
+ : Runtime::kAddPrivateField;
+ builder()->CallRuntime(function_id, args);
}
}
@@ -2140,7 +2121,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
+ V8_FALLTHROUGH;
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
@@ -2563,7 +2544,7 @@ void BytecodeGenerator::BuildAsyncReturn(int source_position) {
BuildVariableLoad(var_promise, HoleCheckMode::kElided);
builder()
->StoreAccumulatorInRegister(promise)
- .CallJSRuntime(Context::PROMISE_RESOLVE_INDEX, args)
+ .CallRuntime(Runtime::kInlineResolvePromise, args)
.LoadAccumulatorWithRegister(promise);
}
@@ -2863,32 +2844,33 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
VisitAssignment(expr);
}
-// Suspends the generator to resume at |suspend_id|, with output stored in the
-// accumulator. When the generator is resumed, the sent value is loaded in the
-// accumulator.
-void BytecodeGenerator::BuildSuspendPoint(int suspend_id) {
+// Suspends the generator to resume at the next suspend_id, with output stored
+// in the accumulator. When the generator is resumed, the sent value is loaded
+// in the accumulator.
+void BytecodeGenerator::BuildSuspendPoint(Expression* suspend_expr) {
+ const int suspend_id = suspend_count_++;
+
RegisterList registers = register_allocator()->AllLiveRegisters();
- // Save context, registers, and state. Then return.
+ // Save context, registers, and state. This bytecode then returns the value
+ // in the accumulator.
+ builder()->SetExpressionPosition(suspend_expr);
builder()->SuspendGenerator(generator_object(), registers, suspend_id);
- builder()->SetReturnPosition(kNoSourcePosition, info()->literal());
- builder()->Return(); // Hard return (ignore any finally blocks).
-
// Upon resume, we continue here.
builder()->Bind(generator_jump_table_, suspend_id);
- // Clobbers all registers, updating the state to indicate that we have
- // finished resuming and setting the accumulator to the [[input_or_debug_pos]]
- // slot of the generator object.
- builder()->ResumeGenerator(generator_object(), generator_state_, registers);
+ // Clobbers all registers and sets the accumulator to the
+ // [[input_or_debug_pos]] slot of the generator object.
+ builder()->ResumeGenerator(generator_object(), registers);
}
void BytecodeGenerator::VisitYield(Yield* expr) {
builder()->SetExpressionPosition(expr);
VisitForAccumulatorValue(expr->expression());
- if (!expr->IsInitialYield()) {
+ // If this is not the first yield
+ if (suspend_count_ > 0) {
if (IsAsyncGeneratorFunction(function_kind())) {
// AsyncGenerator yields (with the exception of the initial yield)
// delegate work to the AsyncGeneratorYield stub, which Awaits the operand
@@ -2914,7 +2896,7 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
}
}
- BuildSuspendPoint(expr->suspend_id());
+ BuildSuspendPoint(expr);
// At this point, the generator has been resumed, with the received value in
// the accumulator.
@@ -3053,10 +3035,16 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// visible to the user, and we therefore neither pass the block coverage
// builder nor the expression.
//
- // YieldStar in AsyncGenerator functions includes 3 suspend points, rather
- // than 1. These are documented in the YieldStar AST node.
+ // In addition to the normal suspend for yield*, a yield* in an async
+ // generator has 2 additional suspends:
+ // - One for awaiting the iterator result of closing the generator when
+ // resumed with a "throw" completion, and a throw method is not
+ // present on the delegated iterator
+ // - One for awaiting the iterator result yielded by the delegated
+ // iterator
+
LoopBuilder loop(builder(), nullptr, nullptr);
- VisitIterationHeader(expr->suspend_id(), expr->suspend_count(), &loop);
+ loop.LoopHeader();
{
BytecodeLabels after_switch(zone());
@@ -3110,7 +3098,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
// If there is no "throw" method, perform IteratorClose, and finally
// throw a TypeError.
no_throw_method.Bind(builder());
- BuildIteratorClose(iterator, expr->await_iterator_close_suspend_id());
+ BuildIteratorClose(iterator, expr);
builder()->CallRuntime(Runtime::kThrowThrowMethodMissing);
}
@@ -3119,7 +3107,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
if (iterator_type == IteratorType::kAsync) {
// Await the result of the method invocation.
- BuildAwait(expr->await_delegated_iterator_output_suspend_id());
+ BuildAwait(expr);
}
// Check that output is an object.
@@ -3159,7 +3147,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
.CallRuntime(Runtime::kInlineAsyncGeneratorYield, args);
}
- BuildSuspendPoint(expr->suspend_id());
+ BuildSuspendPoint(expr);
builder()->StoreAccumulatorInRegister(input);
builder()
->CallRuntime(Runtime::kInlineGeneratorGetResumeMode,
@@ -3195,7 +3183,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
builder()->LoadAccumulatorWithRegister(output_value);
}
-void BytecodeGenerator::BuildAwait(int suspend_id) {
+void BytecodeGenerator::BuildAwait(Expression* await_expr) {
// Rather than HandlerTable::UNCAUGHT, async functions use
// HandlerTable::ASYNC_AWAIT to communicate that top-level exceptions are
// transformed into promise rejections. This is necessary to prevent emitting
@@ -3208,22 +3196,20 @@ void BytecodeGenerator::BuildAwait(int suspend_id) {
// Await(operand) and suspend.
RegisterAllocationScope register_scope(this);
- int await_builtin_context_index;
+ Runtime::FunctionId id;
RegisterList args;
if (IsAsyncGeneratorFunction(function_kind())) {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT
- : Context::ASYNC_GENERATOR_AWAIT_CAUGHT;
+ id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncGeneratorAwaitUncaught
+ : Runtime::kInlineAsyncGeneratorAwaitCaught;
args = register_allocator()->NewRegisterList(2);
builder()
->MoveRegister(generator_object(), args[0])
.StoreAccumulatorInRegister(args[1]);
} else {
- await_builtin_context_index =
- catch_prediction() == HandlerTable::ASYNC_AWAIT
- ? Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX
- : Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX;
+ id = catch_prediction() == HandlerTable::ASYNC_AWAIT
+ ? Runtime::kInlineAsyncFunctionAwaitUncaught
+ : Runtime::kInlineAsyncFunctionAwaitCaught;
args = register_allocator()->NewRegisterList(3);
builder()
->MoveRegister(generator_object(), args[0])
@@ -3236,10 +3222,10 @@ void BytecodeGenerator::BuildAwait(int suspend_id) {
builder()->StoreAccumulatorInRegister(args[2]);
}
- builder()->CallJSRuntime(await_builtin_context_index, args);
+ builder()->CallRuntime(id, args);
}
- BuildSuspendPoint(suspend_id);
+ BuildSuspendPoint(await_expr);
Register input = register_allocator()->NewRegister();
Register resume_mode = register_allocator()->NewRegister();
@@ -3267,7 +3253,7 @@ void BytecodeGenerator::BuildAwait(int suspend_id) {
void BytecodeGenerator::VisitAwait(Await* expr) {
builder()->SetExpressionPosition(expr);
VisitForAccumulatorValue(expr->expression());
- BuildAwait(expr->suspend_id());
+ BuildAwait(expr);
BuildIncrementBlockCoverageCounterIfEnabled(expr,
SourceRangeKind::kContinuation);
}
@@ -3914,7 +3900,8 @@ void BytecodeGenerator::VisitNaryOperation(NaryOperation* expr) {
}
}
-void BytecodeGenerator::BuildLiteralCompareNil(Token::Value op, NilValue nil) {
+void BytecodeGenerator::BuildLiteralCompareNil(
+ Token::Value op, BytecodeArrayBuilder::NilValue nil) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
switch (test_result->fallthrough()) {
@@ -3953,11 +3940,11 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} else if (expr->IsLiteralCompareUndefined(&sub_expr)) {
VisitForAccumulatorValue(sub_expr);
builder()->SetExpressionPosition(expr);
- BuildLiteralCompareNil(expr->op(), kUndefinedValue);
+ BuildLiteralCompareNil(expr->op(), BytecodeArrayBuilder::kUndefinedValue);
} else if (expr->IsLiteralCompareNull(&sub_expr)) {
VisitForAccumulatorValue(sub_expr);
builder()->SetExpressionPosition(expr);
- BuildLiteralCompareNil(expr->op(), kNullValue);
+ BuildLiteralCompareNil(expr->op(), BytecodeArrayBuilder::kNullValue);
} else {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
@@ -4154,7 +4141,7 @@ void BytecodeGenerator::BuildCallIteratorMethod(Register iterator,
}
void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
- int suspend_id) {
+ Expression* expr) {
RegisterAllocationScope register_scope(this);
BytecodeLabels done(zone());
BytecodeLabel if_called;
@@ -4165,8 +4152,8 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
builder()->Bind(&if_called);
if (iterator.type() == IteratorType::kAsync) {
- DCHECK_GE(suspend_id, 0);
- BuildAwait(suspend_id);
+ DCHECK_NOT_NULL(expr);
+ BuildAwait(expr);
}
builder()->JumpIfJSReceiver(done.New());
@@ -4190,7 +4177,8 @@ void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
builder()->SetExpressionPosition(expr);
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
template_objects_.push_back(std::make_pair(expr, entry));
- builder()->GetTemplateObject(entry);
+ FeedbackSlot literal_slot = feedback_spec()->AddLiteralSlot();
+ builder()->GetTemplateObject(entry, feedback_index(literal_slot));
}
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index f9de9550fe..c96e5e9e83 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -28,8 +28,9 @@ class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
- explicit BytecodeGenerator(CompilationInfo* info,
- const AstStringConstants* ast_string_constants);
+ explicit BytecodeGenerator(
+ CompilationInfo* info, const AstStringConstants* ast_string_constants,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate,
@@ -126,7 +127,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildVariableAssignment(
Variable* variable, Token::Value op, HoleCheckMode hole_check_mode,
LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal);
- void BuildLiteralCompareNil(Token::Value compare_op, NilValue nil);
+ void BuildLiteralCompareNil(Token::Value compare_op,
+ BytecodeArrayBuilder::NilValue nil);
void BuildReturn(int source_position = kNoSourcePosition);
void BuildAsyncReturn(int source_position = kNoSourcePosition);
void BuildAsyncGeneratorReturn();
@@ -146,9 +148,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildNewLocalWithContext(Scope* scope);
void BuildGeneratorPrologue();
- void BuildSuspendPoint(int suspend_id);
+ void BuildSuspendPoint(Expression* suspend_expr);
- void BuildAwait(int suspend_id);
+ void BuildAwait(Expression* await_expr);
void BuildGetIterator(Expression* iterable, IteratorType hint);
@@ -164,7 +166,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
IteratorRecord BuildGetIteratorRecord(Expression* iterable,
IteratorType hint);
void BuildIteratorNext(const IteratorRecord& iterator, Register next_result);
- void BuildIteratorClose(const IteratorRecord& iterator, int suspend_id = -1);
+ void BuildIteratorClose(const IteratorRecord& iterator,
+ Expression* expr = nullptr);
void BuildCallIteratorMethod(Register iterator, const AstRawString* method,
RegisterList receiver_and_args,
BytecodeLabel* if_called,
@@ -212,11 +215,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
BytecodeLabels* end_labels,
int coverage_slot);
- // Visit the header/body of a loop iteration.
- void VisitIterationHeader(IterationStatement* stmt,
- LoopBuilder* loop_builder);
- void VisitIterationHeader(int first_suspend_id, int suspend_count,
- LoopBuilder* loop_builder);
+ // Visit the body of a loop iteration.
void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
// Visit a statement and switch scopes, the context is in the accumulator.
@@ -263,6 +262,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Variable* variable);
FeedbackSlot GetCachedCreateClosureSlot(FunctionLiteral* literal);
+ void AddToEagerLiteralsIfEager(FunctionLiteral* literal);
+
static constexpr ToBooleanMode ToBooleanModeFromTypeHint(TypeHint type_hint) {
return type_hint == TypeHint::kBoolean ? ToBooleanMode::kAlreadyBoolean
: ToBooleanMode::kConvertToBoolean;
@@ -324,6 +325,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
DeclarationScope* closure_scope_;
Scope* current_scope_;
+ // External vector of literals to be eagerly compiled.
+ ZoneVector<FunctionLiteral*>* eager_inner_literals_;
+
FeedbackSlotCache* feedback_slot_cache_;
GlobalDeclarationsBuilder* globals_builder_;
@@ -344,7 +348,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register incoming_new_target_or_generator_;
BytecodeJumpTable* generator_jump_table_;
- Register generator_state_;
+ int suspend_count_;
int loop_depth_;
HandlerTable::CatchPrediction catch_prediction_;
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 61173a8341..88cdae6ce5 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -200,6 +200,17 @@ bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
return false;
}
+// static
+bool Bytecodes::IsRegisterListOperandType(OperandType operand_type) {
+ switch (operand_type) {
+ case OperandType::kRegList:
+ case OperandType::kRegOutList:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) {
if (IsCallOrConstruct(bytecode) || IsCallRuntime(bytecode)) return true;
switch (bytecode) {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index ce01566d52..293c0562e9 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -233,7 +233,8 @@ namespace interpreter {
V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \
\
/* Tagged templates */ \
- V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx) \
\
/* Closure allocation */ \
V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
@@ -314,11 +315,12 @@ namespace interpreter {
V(ThrowSuperAlreadyCalledIfNotHole, AccumulatorUse::kRead) \
\
/* Generators */ \
- V(RestoreGeneratorState, AccumulatorUse::kWrite, OperandType::kReg) \
- V(SuspendGenerator, AccumulatorUse::kNone, OperandType::kReg, \
+ V(SwitchOnGeneratorState, AccumulatorUse::kNone, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kUImm) \
+ V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kUImm) \
V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kRegOut, OperandType::kRegOutList, OperandType::kRegCount) \
+ OperandType::kRegOutList, OperandType::kRegCount) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
@@ -432,6 +434,10 @@ namespace interpreter {
JUMP_FORWARD_BYTECODE_LIST(V) \
V(JumpLoop)
+#define RETURN_BYTECODE_LIST(V) \
+ V(Return) \
+ V(SuspendGenerator)
+
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
#define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -613,11 +619,6 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
bytecode <= Bytecode::kJumpIfJSReceiver;
}
- // Returns true if the bytecode is a conditional jump, a jump, or a return.
- static constexpr bool IsJumpOrReturn(Bytecode bytecode) {
- return bytecode == Bytecode::kReturn || IsJump(bytecode);
- }
-
// Return true if |bytecode| is a jump without effects,
// e.g. any jump excluding those that include type coercion like
// JumpIfTrueToBoolean.
@@ -627,7 +628,8 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Returns true if the bytecode is a switch.
static constexpr bool IsSwitch(Bytecode bytecode) {
- return bytecode == Bytecode::kSwitchOnSmiNoFeedback;
+ return bytecode == Bytecode::kSwitchOnSmiNoFeedback ||
+ bytecode == Bytecode::kSwitchOnGeneratorState;
}
// Returns true if |bytecode| has no effects. These bytecodes only manipulate
@@ -681,9 +683,16 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
return true;
}
+ // Returns true if the bytecode returns.
+ static constexpr bool Returns(Bytecode bytecode) {
+#define OR_BYTECODE(NAME) || bytecode == Bytecode::k##NAME
+ return false RETURN_BYTECODE_LIST(OR_BYTECODE);
+#undef OR_BYTECODE
+ }
+
// Returns the number of values which |bytecode| returns.
static constexpr size_t ReturnCount(Bytecode bytecode) {
- return bytecode == Bytecode::kReturn ? 1 : 0;
+ return Returns(bytecode) ? 1 : 0;
}
// Returns the number of operands expected by |bytecode|.
@@ -812,6 +821,9 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Returns true if |operand_type| represents a register used as an output.
static bool IsRegisterOutputOperandType(OperandType operand_type);
+ // Returns true if |operand_type| represents a register list operand.
+ static bool IsRegisterListOperandType(OperandType operand_type);
+
// Returns true if the handler for |bytecode| should look ahead and inline a
// dispatch to a Star bytecode.
static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale);
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index ea316f286f..bada935e4a 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -47,10 +47,6 @@ void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) {
LoopBuilder::~LoopBuilder() {
DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
- // Restore the parent jump table.
- if (generator_jump_table_location_ != nullptr) {
- *generator_jump_table_location_ = parent_generator_jump_table_;
- }
}
void LoopBuilder::LoopHeader() {
@@ -62,26 +58,6 @@ void LoopBuilder::LoopHeader() {
builder()->Bind(&loop_header_);
}
-void LoopBuilder::LoopHeaderInGenerator(
- BytecodeJumpTable** generator_jump_table, int first_resume_id,
- int resume_count) {
- // Bind all the resume points that are inside the loop to be at the loop
- // header.
- for (int id = first_resume_id; id < first_resume_id + resume_count; ++id) {
- builder()->Bind(*generator_jump_table, id);
- }
-
- // Create the loop header.
- LoopHeader();
-
- // Create a new jump table for after the loop header for only these
- // resume points.
- generator_jump_table_location_ = generator_jump_table;
- parent_generator_jump_table_ = *generator_jump_table;
- *generator_jump_table =
- builder()->AllocateJumpTable(resume_count, first_resume_id);
-}
-
void LoopBuilder::LoopBody() {
if (block_coverage_builder_ != nullptr) {
block_coverage_builder_->IncrementBlockCounter(block_coverage_body_slot_);
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 4a81b1f205..405e81bc76 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -105,9 +105,7 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
LoopBuilder(BytecodeArrayBuilder* builder,
BlockCoverageBuilder* block_coverage_builder, AstNode* node)
: BreakableControlFlowBuilder(builder, block_coverage_builder, node),
- continue_labels_(builder->zone()),
- generator_jump_table_location_(nullptr),
- parent_generator_jump_table_(nullptr) {
+ continue_labels_(builder->zone()) {
if (block_coverage_builder_ != nullptr) {
set_needs_continuation_counter();
block_coverage_body_slot_ =
@@ -118,8 +116,6 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
~LoopBuilder();
void LoopHeader();
- void LoopHeaderInGenerator(BytecodeJumpTable** parent_generator_jump_table,
- int first_resume_id, int resume_count);
void LoopBody();
void JumpToHeader(int loop_depth);
void BindContinueTarget();
@@ -138,13 +134,6 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
// jumps from checking the loop condition to the header for do-while loops.
BytecodeLabels continue_labels_;
- // While we're in the loop, we want to have a different jump table for
- // generator switch statements. We restore it at the end of the loop.
- // TODO(leszeks): Storing a pointer to the BytecodeGenerator's jump table
- // field is ugly, figure out a better way to do this.
- BytecodeJumpTable** generator_jump_table_location_;
- BytecodeJumpTable* parent_generator_jump_table_;
-
int block_coverage_body_slot_;
};
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 4b6c44b95d..93db1e969a 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -15,20 +15,20 @@ namespace interpreter {
HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {}
-Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
+Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
int handler_table_size = static_cast<int>(entries_.size());
- Handle<HandlerTable> table =
- Handle<HandlerTable>::cast(isolate->factory()->NewFixedArray(
- HandlerTable::LengthForRange(handler_table_size), TENURED));
+ Handle<ByteArray> table_byte_array = isolate->factory()->NewByteArray(
+ HandlerTable::LengthForRange(handler_table_size), TENURED);
+ HandlerTable table(*table_byte_array);
for (int i = 0; i < handler_table_size; ++i) {
Entry& entry = entries_[i];
HandlerTable::CatchPrediction pred = entry.catch_prediction_;
- table->SetRangeStart(i, static_cast<int>(entry.offset_start));
- table->SetRangeEnd(i, static_cast<int>(entry.offset_end));
- table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
- table->SetRangeData(i, entry.context.index());
+ table.SetRangeStart(i, static_cast<int>(entry.offset_start));
+ table.SetRangeEnd(i, static_cast<int>(entry.offset_end));
+ table.SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
+ table.SetRangeData(i, entry.context.index());
}
- return table;
+ return table_byte_array;
}
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 381606f98b..021fefad29 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final BASE_EMBEDDED {
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
- Handle<HandlerTable> ToHandlerTable(Isolate* isolate);
+ Handle<ByteArray> ToHandlerTable(Isolate* isolate);
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by below setter functions.
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 846b69281e..b2c4ba2309 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -48,6 +48,8 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
made_call_(false),
reloaded_frame_ptr_(false),
bytecode_array_valid_(true),
+ speculation_poison_(FLAG_untrusted_code_mitigations ? SpeculationPoison()
+ : nullptr),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
#ifdef V8_TRACE_IGNITION
@@ -59,7 +61,7 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
// Save the bytecode offset immediately if bytecode will make a call along the
// critical path, or it is a return bytecode.
if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
- bytecode_ == Bytecode::kReturn) {
+ Bytecodes::Returns(bytecode)) {
SaveBytecodeOffset();
}
}
@@ -72,6 +74,24 @@ InterpreterAssembler::~InterpreterAssembler() {
UnregisterCallGenerationCallbacks();
}
+Node* InterpreterAssembler::PoisonOnSpeculationTagged(Node* value) {
+ if (speculation_poison_ == nullptr) return value;
+ return BitcastWordToTagged(
+ WordAnd(speculation_poison_, BitcastTaggedToWord(value)));
+}
+
+Node* InterpreterAssembler::PoisonOnSpeculationWord(Node* value) {
+ if (speculation_poison_ == nullptr) return value;
+ return WordAnd(speculation_poison_, value);
+}
+
+Node* InterpreterAssembler::PoisonOnSpeculationInt32(Node* value) {
+ if (speculation_poison_ == nullptr) return value;
+ Node* truncated_speculation_poison =
+ Is64() ? TruncateInt64ToInt32(speculation_poison_) : speculation_poison_;
+ return Word32And(truncated_speculation_poison, value);
+}
+
Node* InterpreterAssembler::GetInterpretedFramePointer() {
if (!interpreted_frame_pointer_.IsBound()) {
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
@@ -139,7 +159,7 @@ Node* InterpreterAssembler::GetAccumulatorUnchecked() {
Node* InterpreterAssembler::GetAccumulator() {
DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
- return GetAccumulatorUnchecked();
+ return PoisonOnSpeculationTagged(GetAccumulatorUnchecked());
}
void InterpreterAssembler::SetAccumulator(Node* value) {
@@ -222,22 +242,27 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
- return IntPtrAdd(GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index));
+ return PoisonOnSpeculationWord(
+ IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
+}
+
+Node* InterpreterAssembler::RegisterLocation(Register reg) {
+ return RegisterLocation(IntPtrConstant(reg.ToOperand()));
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
return TimesPointerSize(index);
}
-Node* InterpreterAssembler::LoadRegister(Register reg) {
- return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
+Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+ Node* value = Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index));
+ return PoisonOnSpeculationTagged(value);
}
-Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+Node* InterpreterAssembler::LoadRegister(Register reg) {
return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index));
+ IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
}
Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
@@ -245,22 +270,92 @@ Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
<< kPointerSizeLog2);
}
-Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
- return StoreNoWriteBarrier(
+Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
+ return LoadRegister(BytecodeOperandRegUnpoisoned(operand_index));
+}
+
+std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
+ int operand_index) {
+ DCHECK_EQ(OperandType::kRegPair,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ Node* second_reg_index = NextRegister(first_reg_index);
+ return std::make_pair(LoadRegister(first_reg_index),
+ LoadRegister(second_reg_index));
+}
+
+InterpreterAssembler::RegListNodePair
+InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
+ DCHECK(Bytecodes::IsRegisterListOperandType(
+ Bytecodes::GetOperandType(bytecode_, operand_index)));
+ DCHECK_EQ(OperandType::kRegCount,
+ Bytecodes::GetOperandType(bytecode_, operand_index + 1));
+ Node* base_reg =
+ RegisterLocation(BytecodeOperandRegUnpoisoned(operand_index));
+ Node* reg_count = BytecodeOperandCount(operand_index + 1);
+ return RegListNodePair(base_reg, reg_count);
+}
+
+Node* InterpreterAssembler::LoadRegisterFromRegisterList(
+ const RegListNodePair& reg_list, int index) {
+ Node* location = RegisterLocationInRegisterList(reg_list, index);
+ // Location is already poisoned on speculation, so no need to poison here.
+ return Load(MachineType::AnyTagged(), location);
+}
+
+Node* InterpreterAssembler::RegisterLocationInRegisterList(
+ const RegListNodePair& reg_list, int index) {
+ CSA_ASSERT(this,
+ Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
+ Node* offset = RegisterFrameOffset(IntPtrConstant(index));
+ // Register indexes are negative, so subtract index from base location to get
+ // location.
+ return IntPtrSub(reg_list.base_reg_location(), offset);
+}
+
+void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
+ StoreNoWriteBarrier(
MachineRepresentation::kTagged, GetInterpretedFramePointer(),
IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
}
-Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
- return StoreNoWriteBarrier(MachineRepresentation::kTagged,
- GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index), value);
+void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index), value);
}
-Node* InterpreterAssembler::StoreAndTagRegister(compiler::Node* value,
- Register reg) {
+void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
int offset = reg.ToOperand() << kPointerSizeLog2;
- return StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
+ StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
+}
+
+void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
+ int operand_index) {
+ StoreRegister(value, BytecodeOperandRegUnpoisoned(operand_index));
+}
+
+void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
+ Node* value2,
+ int operand_index) {
+ DCHECK_EQ(OperandType::kRegOutPair,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ StoreRegister(value1, first_reg_index);
+ Node* second_reg_index = NextRegister(first_reg_index);
+ StoreRegister(value2, second_reg_index);
+}
+
+void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
+ Node* value1, Node* value2, Node* value3, int operand_index) {
+ DCHECK_EQ(OperandType::kRegOutTriple,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ StoreRegister(value1, first_reg_index);
+ Node* second_reg_index = NextRegister(first_reg_index);
+ StoreRegister(value2, second_reg_index);
+ Node* third_reg_index = NextRegister(second_reg_index);
+ StoreRegister(value3, third_reg_index);
}
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
@@ -273,7 +368,8 @@ Node* InterpreterAssembler::OperandOffset(int operand_index) {
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedByteUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -282,7 +378,8 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) {
IntPtrAdd(BytecodeOffset(), operand_offset));
}
-Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedByteUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -291,7 +388,7 @@ Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
IntPtrAdd(BytecodeOffset(), operand_offset));
}
-compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+Node* InterpreterAssembler::BytecodeOperandReadUnalignedUnpoisoned(
int relative_offset, MachineType result_type) {
static const int kMaxCount = 4;
DCHECK(!TargetSupportsUnalignedAccess());
@@ -324,7 +421,7 @@ compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
// Read the most signicant bytecode into bytes[0] and then in order
// down to least significant in bytes[count - 1].
DCHECK_LE(count, kMaxCount);
- compiler::Node* bytes[kMaxCount];
+ Node* bytes[kMaxCount];
for (int i = 0; i < count; i++) {
MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
@@ -342,7 +439,8 @@ compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
return result;
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedShortUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -353,11 +451,13 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) {
return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Uint16());
}
}
-Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedShortUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -368,11 +468,13 @@ Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Int16());
}
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedQuadUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -382,11 +484,13 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Uint32());
}
}
-Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedQuadUnpoisoned(
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -396,44 +500,57 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
+ return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
+ MachineType::Int32());
}
}
-Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
- OperandSize operand_size) {
+Node* InterpreterAssembler::BytecodeSignedOperandUnpoisoned(
+ int operand_index, OperandSize operand_size) {
DCHECK(!Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandSignedByte(operand_index);
+ return BytecodeOperandSignedByteUnpoisoned(operand_index);
case OperandSize::kShort:
- return BytecodeOperandSignedShort(operand_index);
+ return BytecodeOperandSignedShortUnpoisoned(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandSignedQuad(operand_index);
+ return BytecodeOperandSignedQuadUnpoisoned(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
-Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
- OperandSize operand_size) {
+Node* InterpreterAssembler::BytecodeUnsignedOperandUnpoisoned(
+ int operand_index, OperandSize operand_size) {
DCHECK(Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandUnsignedByte(operand_index);
+ return BytecodeOperandUnsignedByteUnpoisoned(operand_index);
case OperandSize::kShort:
- return BytecodeOperandUnsignedShort(operand_index);
+ return BytecodeOperandUnsignedShortUnpoisoned(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandUnsignedQuad(operand_index);
+ return BytecodeOperandUnsignedQuadUnpoisoned(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
+Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
+ OperandSize operand_size) {
+ return PoisonOnSpeculationInt32(
+ BytecodeSignedOperandUnpoisoned(operand_index, operand_size));
+}
+
+Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
+ OperandSize operand_size) {
+ return PoisonOnSpeculationInt32(
+ BytecodeUnsignedOperandUnpoisoned(operand_index, operand_size));
+}
+
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
DCHECK_EQ(OperandType::kRegCount,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -464,7 +581,7 @@ Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
}
Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
- return SmiFromWord32(BytecodeOperandUImm(operand_index));
+ return SmiFromInt32(BytecodeOperandUImm(operand_index));
}
Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
@@ -480,7 +597,7 @@ Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
}
Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
- return SmiFromWord32(BytecodeOperandImm(operand_index));
+ return SmiFromInt32(BytecodeOperandImm(operand_index));
}
Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
@@ -499,13 +616,23 @@ Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
return SmiTag(BytecodeOperandIdx(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandConstantPoolIdxUnpoisoned(
+ int operand_index) {
+ DCHECK_EQ(OperandType::kIdx,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ return ChangeUint32ToWord(
+ BytecodeUnsignedOperand(operand_index, operand_size));
+}
+
+Node* InterpreterAssembler::BytecodeOperandRegUnpoisoned(int operand_index) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeInt32ToIntPtr(
- BytecodeSignedOperand(operand_index, operand_size));
+ BytecodeSignedOperandUnpoisoned(operand_index, operand_size));
}
Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
@@ -539,18 +666,27 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
- return LoadFixedArrayElement(constant_pool, index);
+ return PoisonOnSpeculationTagged(LoadFixedArrayElement(constant_pool, index));
}
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
return SmiUntag(LoadConstantPoolEntry(index));
}
+Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
+ int operand_index) {
+ Node* index = BytecodeOperandConstantPoolIdxUnpoisoned(operand_index);
+ return LoadConstantPoolEntry(index);
+}
+
+Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
+ int operand_index) {
+ return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
+}
+
Node* InterpreterAssembler::LoadFeedbackVector() {
Node* function = LoadRegister(Register::function_closure());
- Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset);
- Node* vector = LoadObjectField(cell, Cell::kValueOffset);
- return vector;
+ return CodeStubAssembler::LoadFeedbackVector(function);
}
void InterpreterAssembler::CallPrologue() {
@@ -586,11 +722,11 @@ void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
Comment("increment call count");
Node* call_count =
LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize);
- // The lowest {CallICNexus::CallCountField::kShift} bits of the call
+ // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
// count are used as flags. To increment the call count by 1 we hence
- // have to increment by 1 << {CallICNexus::CallCountField::kShift}.
- Node* new_count =
- SmiAdd(call_count, SmiConstant(1 << CallICNexus::CallCountField::kShift));
+ // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
+ Node* new_count = SmiAdd(
+ call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
// Count is Smi, so we don't need a write barrier.
StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
SKIP_WRITE_BARRIER, kPointerSize);
@@ -707,18 +843,30 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
}
void InterpreterAssembler::CallJSAndDispatch(
- Node* function, Node* context, Node* first_arg, Node* arg_count,
+ Node* function, Node* context, const RegListNodePair& args,
ConvertReceiverMode receiver_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
+
+ Node* args_count;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // The receiver is implied, so it is not in the argument list.
+ args_count = args.reg_count();
+ } else {
+ // Subtract the receiver from the argument count.
+ Node* receiver_count = Int32Constant(1);
+ args_count = Int32Sub(args.reg_count(), receiver_count);
+ }
+
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
- arg_count, first_arg, function);
+ args_count, args.base_reg_location(),
+ function);
// TailCallStubThenDispatch updates accumulator with result.
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
@@ -764,8 +912,8 @@ template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
- Node* function, Node* context, Node* first_arg, Node* arg_count,
- Node* slot_id, Node* feedback_vector) {
+ Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
+ Node* feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
CollectCallFeedback(function, context, feedback_vector, slot_id);
@@ -775,16 +923,19 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
+ Node* receiver_count = Int32Constant(1);
+ Node* args_count = Int32Sub(args.reg_count(), receiver_count);
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
- arg_count, first_arg, function);
+ args_count, args.base_reg_location(),
+ function);
// TailCallStubThenDispatch updates accumulator with result.
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
Node* InterpreterAssembler::Construct(Node* target, Node* context,
- Node* new_target, Node* first_arg,
- Node* arg_count, Node* slot_id,
- Node* feedback_vector) {
+ Node* new_target,
+ const RegListNodePair& args,
+ Node* slot_id, Node* feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
VARIABLE(var_result, MachineRepresentation::kTagged);
VARIABLE(var_site, MachineRepresentation::kTagged);
@@ -937,8 +1088,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
isolate(), InterpreterPushArgsMode::kJSFunction);
Node* code_target = HeapConstant(callable.code());
var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- arg_count, new_target, target, var_site.value(),
- first_arg));
+ args.reg_count(), new_target, target,
+ var_site.value(), args.base_reg_location()));
Goto(&return_result);
}
@@ -950,8 +1101,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
isolate(), InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
var_result.Bind(CallStub(callable.descriptor(), code_target, context,
- arg_count, new_target, target, UndefinedConstant(),
- first_arg));
+ args.reg_count(), new_target, target,
+ UndefinedConstant(), args.base_reg_location()));
Goto(&return_result);
}
@@ -961,8 +1112,8 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
Node* new_target,
- Node* first_arg,
- Node* arg_count, Node* slot_id,
+ const RegListNodePair& args,
+ Node* slot_id,
Node* feedback_vector) {
// TODO(bmeurer): Unify this with the Construct bytecode feedback
// above once we have a way to pass the AllocationSite to the Array
@@ -1075,12 +1226,13 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), code_target, context, arg_count,
- new_target, target, UndefinedConstant(), first_arg);
+ return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
+ new_target, target, UndefinedConstant(),
+ args.base_reg_location());
}
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
- Node* first_arg, Node* arg_count,
+ const RegListNodePair& args,
int result_size) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallRuntime(bytecode_));
@@ -1099,7 +1251,7 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
IntPtrConstant(offsetof(Runtime::Function, entry)));
return CallStubR(callable.descriptor(), result_size, code_target, context,
- arg_count, first_arg, function_entry);
+ args.reg_count(), args.base_reg_location(), function_entry);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
@@ -1132,7 +1284,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
BIND(&interrupt_check);
{
CallRuntime(Runtime::kInterrupt, GetContext());
- new_budget.Bind(Int32Constant(Interpreter::kInterruptBudget));
+ new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
Goto(&ok);
}
@@ -1169,7 +1321,7 @@ Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
- UpdateInterruptBudget(TruncateWordToWord32(delta), backward);
+ UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
Node* new_bytecode_offset = Advance(delta, backward);
Node* target_bytecode = LoadBytecode(new_bytecode_offset);
return DispatchToBytecode(target_bytecode, new_bytecode_offset);
@@ -1200,7 +1352,7 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
JumpConditional(WordNotEqual(lhs, rhs), delta);
}
-Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
+Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
Node* bytecode =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
return ChangeUint32ToWord(bytecode);
@@ -1236,7 +1388,7 @@ void InterpreterAssembler::InlineStar() {
#ifdef V8_TRACE_IGNITION
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
- StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
+ StoreRegister(GetAccumulator(), BytecodeOperandRegUnpoisoned(0));
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
@@ -1267,24 +1419,29 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Load(MachineType::Pointer(), DispatchTableRawPointer(),
TimesPointerSize(target_bytecode));
- return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
+ return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
+ target_bytecode);
}
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
- Node* bytecode_offset) {
+ Node* bytecode_offset,
+ Node* target_bytecode) {
// TODO(ishell): Add CSA::CodeEntryPoint(code).
Node* handler_entry =
IntPtrAdd(BitcastTaggedToWord(handler),
IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
- return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
+ return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
+ target_bytecode);
}
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
- Node* handler_entry, Node* bytecode_offset) {
+ Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
InterpreterDispatchDescriptor descriptor(isolate());
+ // Propagate speculation poisoning.
+ Node* poisoned_handler_entry = PoisonOnSpeculationWord(handler_entry);
return TailCallBytecodeDispatch(
- descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
- BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
+ descriptor, poisoned_handler_entry, GetAccumulatorUnchecked(),
+ bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1319,7 +1476,8 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
Load(MachineType::Pointer(), DispatchTableRawPointer(),
TimesPointerSize(target_index));
- DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
+ DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
+ next_bytecode);
}
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
@@ -1342,7 +1500,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// of the first bytecode.
const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
- Node* profiling_weight = Int32Sub(TruncateWordToWord32(BytecodeOffset()),
+ Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
Int32Constant(kFirstBytecodeOffset));
UpdateInterruptBudget(profiling_weight, true);
}
@@ -1451,9 +1609,12 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file,
BIND(&ok);
}
-Node* InterpreterAssembler::ExportRegisterFile(Node* array,
- Node* register_count) {
+Node* InterpreterAssembler::ExportRegisterFile(
+ Node* array, const RegListNodePair& registers) {
+ Node* register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
+ CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, register_count);
}
@@ -1483,9 +1644,12 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array,
return array;
}
-Node* InterpreterAssembler::ImportRegisterFile(Node* array,
- Node* register_count) {
+Node* InterpreterAssembler::ImportRegisterFile(
+ Node* array, const RegListNodePair& registers) {
+ Node* register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
+ CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, register_count);
}
@@ -1587,8 +1751,7 @@ void InterpreterAssembler::DeserializeLazyAndDispatch() {
Node* target_handler =
CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
SmiTag(bytecode), SmiConstant(operand_scale()));
-
- DispatchToBytecodeHandler(target_handler, bytecode_offset);
+ DispatchToBytecodeHandler(target_handler, bytecode_offset, bytecode);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 63d1709145..cb622d0b2d 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -56,9 +56,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
compiler::Node* BytecodeOperandImmSmi(int operand_index);
- // Returns the word-size sign-extended register index for bytecode operand
- // |operand_index| in the current bytecode.
- compiler::Node* BytecodeOperandReg(int operand_index);
// Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
@@ -86,31 +83,58 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
compiler::Node* depth, Label* target);
+ // A RegListNodePair provides an abstraction over lists of registers.
+ class RegListNodePair {
+ public:
+ RegListNodePair(Node* base_reg_location, Node* reg_count)
+ : base_reg_location_(base_reg_location), reg_count_(reg_count) {}
+
+ compiler::Node* reg_count() const { return reg_count_; }
+ compiler::Node* base_reg_location() const { return base_reg_location_; }
+
+ private:
+ compiler::Node* base_reg_location_;
+ compiler::Node* reg_count_;
+ };
+
// Backup/restore register file to/from a fixed array of the correct length.
compiler::Node* ExportRegisterFile(compiler::Node* array,
- compiler::Node* register_count);
+ const RegListNodePair& registers);
compiler::Node* ImportRegisterFile(compiler::Node* array,
- compiler::Node* register_count);
+ const RegListNodePair& registers);
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(Register reg);
- compiler::Node* LoadRegister(compiler::Node* reg_index);
compiler::Node* LoadAndUntagRegister(Register reg);
- compiler::Node* StoreRegister(compiler::Node* value, Register reg);
- compiler::Node* StoreRegister(compiler::Node* value,
- compiler::Node* reg_index);
- compiler::Node* StoreAndTagRegister(compiler::Node* value, Register reg);
-
- // Returns the next consecutive register.
- compiler::Node* NextRegister(compiler::Node* reg_index);
-
- // Returns the location in memory of the register |reg_index| in the
- // interpreter register file.
- compiler::Node* RegisterLocation(compiler::Node* reg_index);
-
+ compiler::Node* LoadRegisterAtOperandIndex(int operand_index);
+ std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex(
+ int operand_index);
+ void StoreRegister(compiler::Node* value, Register reg);
+ void StoreAndTagRegister(compiler::Node* value, Register reg);
+ void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
+ void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
+ compiler::Node* value2,
+ int operand_index);
+ void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
+ compiler::Node* value2,
+ compiler::Node* value3,
+ int operand_index);
+
+ RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
+ Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
+ int index);
+ Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list,
+ int index);
+
+ // Load constant at the index specified in operand |operand_index| from the
+ // constant pool.
+ compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
+ // Load and untag constant at the index specified in operand |operand_index|
+ // from the constant pool.
+ compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex(
+ int operand_index);
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
-
// Load and untag constant at |index| in the constant pool.
compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
@@ -135,12 +159,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* feedback_vector,
compiler::Node* slot_id);
- // Call JSFunction or Callable |function| with |arg_count| arguments (not
- // including receiver) and the first argument located at |first_arg|, possibly
+ // Call JSFunction or Callable |function| with |args| arguments, possibly
// including the receiver depending on |receiver_mode|. After the call returns
// directly dispatches to the next bytecode.
void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
- compiler::Node* first_arg, compiler::Node* arg_count,
+ const RegListNodePair& args,
ConvertReceiverMode receiver_mode);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
@@ -151,46 +174,41 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
ConvertReceiverMode receiver_mode, TArgs... args);
- // Call JSFunction or Callable |function| with |arg_count|
- // arguments (not including receiver) and the first argument
- // located at |first_arg|, and the final argument being spread. After the call
- // returns directly dispatches to the next bytecode.
+ // Call JSFunction or Callable |function| with |args|
+ // arguments (not including receiver), and the final argument being spread.
+ // After the call returns directly dispatches to the next bytecode.
void CallJSWithSpreadAndDispatch(compiler::Node* function,
compiler::Node* context,
- compiler::Node* first_arg,
- compiler::Node* arg_count,
+ const RegListNodePair& args,
compiler::Node* slot_id,
compiler::Node* feedback_vector);
- // Call constructor |target| with |arg_count| arguments (not
- // including receiver) and the first argument located at
- // |first_arg|. The |new_target| is the same as the
- // |target| for the new keyword, but differs for the super
- // keyword.
+ // Call constructor |target| with |args| arguments (not including receiver).
+ // The |new_target| is the same as the |target| for the new keyword, but
+ // differs for the super keyword.
compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
compiler::Node* new_target,
- compiler::Node* first_arg,
- compiler::Node* arg_count, compiler::Node* slot_id,
+ const RegListNodePair& args,
+ compiler::Node* slot_id,
compiler::Node* feedback_vector);
- // Call constructor |target| with |arg_count| arguments (not including
- // receiver) and the first argument located at |first_arg|. The last argument
- // is always a spread. The |new_target| is the same as the |target| for
- // the new keyword, but differs for the super keyword.
+ // Call constructor |target| with |args| arguments (not including
+ // receiver). The last argument is always a spread. The |new_target| is the
+ // same as the |target| for the new keyword, but differs for the super
+ // keyword.
compiler::Node* ConstructWithSpread(compiler::Node* target,
compiler::Node* context,
compiler::Node* new_target,
- compiler::Node* first_arg,
- compiler::Node* arg_count,
+ const RegListNodePair& args,
compiler::Node* slot_id,
compiler::Node* feedback_vector);
- // Call runtime function with |arg_count| arguments and the first argument
- // located at |first_arg|.
+ // Call runtime function with |args| arguments which will return |return_size|
+ // number of values.
compiler::Node* CallRuntimeN(compiler::Node* function_id,
compiler::Node* context,
- compiler::Node* first_arg,
- compiler::Node* arg_count, int return_size = 1);
+ const RegListNodePair& args,
+ int return_size = 1);
// Jump forward relative to the current bytecode by the |jump_offset|.
compiler::Node* Jump(compiler::Node* jump_offset);
@@ -217,14 +235,14 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Dispatch to the bytecode.
compiler::Node* Dispatch();
- // Dispatch to bytecode handler.
- compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
- return DispatchToBytecodeHandler(handler, BytecodeOffset());
- }
-
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
+ // Dispatch to |target_bytecode| at |new_bytecode_offset|.
+ // |target_bytecode| should be equivalent to loading from the offset.
+ compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
+ compiler::Node* new_bytecode_offset);
+
// Abort with the given abort reason.
void Abort(AbortReason abort_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
@@ -264,6 +282,18 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// interpreted.
compiler::Node* GetInterpretedFramePointer();
+ // Operations on registers.
+ compiler::Node* RegisterLocation(Register reg);
+ compiler::Node* RegisterLocation(compiler::Node* reg_index);
+ compiler::Node* NextRegister(compiler::Node* reg_index);
+ compiler::Node* LoadRegister(Node* reg_index);
+ void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
+
+ // Poison |value| on speculative paths.
+ compiler::Node* PoisonOnSpeculationTagged(Node* value);
+ compiler::Node* PoisonOnSpeculationWord(Node* value);
+ compiler::Node* PoisonOnSpeculationInt32(Node* value);
+
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue();
@@ -291,16 +321,21 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
- MachineType result_type);
-
- // Returns zero- or sign-extended to word32 value of the operand.
- compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
- compiler::Node* BytecodeOperandSignedByte(int operand_index);
- compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
- compiler::Node* BytecodeOperandSignedShort(int operand_index);
- compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
- compiler::Node* BytecodeOperandSignedQuad(int operand_index);
+ compiler::Node* BytecodeOperandReadUnalignedUnpoisoned(
+ int relative_offset, MachineType result_type);
+
+ // Returns zero- or sign-extended to word32 value of the operand. Values are
+ // not poisoned on speculation - should be used with care.
+ compiler::Node* BytecodeOperandUnsignedByteUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandSignedByteUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandUnsignedShortUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandSignedShortUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandUnsignedQuadUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandSignedQuadUnpoisoned(int operand_index);
+ compiler::Node* BytecodeSignedOperandUnpoisoned(int operand_index,
+ OperandSize operand_size);
+ compiler::Node* BytecodeUnsignedOperandUnpoisoned(int operand_index,
+ OperandSize operand_size);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
@@ -309,6 +344,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size);
+ // Returns the word-size sign-extended register index for bytecode operand
+ // |operand_index| in the current bytecode. Value is not poisoned on
+ // speculation since the value loaded from the register is poisoned instead.
+ compiler::Node* BytecodeOperandRegUnpoisoned(int operand_index);
+
+ // Returns the word zero-extended index immediate for bytecode operand
+ // |operand_index| in the current bytecode for use when loading a .
+ compiler::Node* BytecodeOperandConstantPoolIdxUnpoisoned(int operand_index);
+
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
// offset). Helper function for Jump and JumpBackward.
@@ -344,18 +388,15 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// next dispatch offset.
void InlineStar();
- // Dispatch to |target_bytecode| at |new_bytecode_offset|.
- // |target_bytecode| should be equivalent to loading from the offset.
- compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
- compiler::Node* new_bytecode_offset);
-
// Dispatch to the bytecode handler with code offset |handler|.
compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
- compiler::Node* bytecode_offset);
+ compiler::Node* bytecode_offset,
+ compiler::Node* target_bytecode);
// Dispatch to the bytecode handler with code entry point |handler_entry|.
compiler::Node* DispatchToBytecodeHandlerEntry(
- compiler::Node* handler_entry, compiler::Node* bytecode_offset);
+ compiler::Node* handler_entry, compiler::Node* bytecode_offset,
+ compiler::Node* target_bytecode);
int CurrentBytecodeSize() const;
@@ -373,6 +414,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
bool reloaded_frame_ptr_;
bool bytecode_array_valid_;
+ Node* speculation_poison_;
+
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 5dabc13ea0..65af249ea7 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -73,8 +73,7 @@ IGNITION_HANDLER(LdaSmi, InterpreterAssembler) {
//
// Load constant literal at |idx| in the constant pool into the accumulator.
IGNITION_HANDLER(LdaConstant, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* constant = LoadConstantPoolEntry(index);
+ Node* constant = LoadConstantPoolEntryAtOperandIndex(0);
SetAccumulator(constant);
Dispatch();
}
@@ -123,8 +122,7 @@ IGNITION_HANDLER(LdaFalse, InterpreterAssembler) {
//
// Load accumulator with value from register <src>.
IGNITION_HANDLER(Ldar, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* value = LoadRegister(reg_index);
+ Node* value = LoadRegisterAtOperandIndex(0);
SetAccumulator(value);
Dispatch();
}
@@ -133,9 +131,8 @@ IGNITION_HANDLER(Ldar, InterpreterAssembler) {
//
// Store accumulator to register <dst>.
IGNITION_HANDLER(Star, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
Node* accumulator = GetAccumulator();
- StoreRegister(accumulator, reg_index);
+ StoreRegisterAtOperandIndex(accumulator, 0);
Dispatch();
}
@@ -143,10 +140,8 @@ IGNITION_HANDLER(Star, InterpreterAssembler) {
//
// Stores the value of register <src> to register <dst>.
IGNITION_HANDLER(Mov, InterpreterAssembler) {
- Node* src_index = BytecodeOperandReg(0);
- Node* src_value = LoadRegister(src_index);
- Node* dst_index = BytecodeOperandReg(1);
- StoreRegister(src_value, dst_index);
+ Node* src_value = LoadRegisterAtOperandIndex(0);
+ StoreRegisterAtOperandIndex(src_value, 1);
Dispatch();
}
@@ -169,8 +164,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
LazyNode<Context> lazy_context = [=] { return CAST(GetContext()); };
LazyNode<Name> lazy_name = [=] {
- Node* name_index = BytecodeOperandIdx(name_operand_index);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(name_operand_index);
return CAST(name);
};
@@ -214,8 +208,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
Node* context = GetContext();
// Store the global via the StoreGlobalIC.
- Node* constant_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(constant_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
@@ -230,8 +223,7 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
Node* slot_context = GetContextAtDepth(context, depth);
@@ -245,8 +237,7 @@ IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) {
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
Node* slot_context = GetContextAtDepth(context, depth);
@@ -283,8 +274,7 @@ IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) {
// |depth| in the context chain starting at |context|.
IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
Node* slot_context = GetContextAtDepth(context, depth);
@@ -309,8 +299,7 @@ IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
- Node* name_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* context = GetContext();
Node* result = CallRuntime(Runtime::kLoadLookupSlot, context, name);
SetAccumulator(result);
@@ -322,8 +311,7 @@ IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) {
- Node* name_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* context = GetContext();
Node* result =
CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name);
@@ -340,7 +328,6 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
void LookupContextSlot(Runtime::FunctionId function_id) {
Node* context = GetContext();
- Node* name_index = BytecodeOperandIdx(0);
Node* slot_index = BytecodeOperandIdx(1);
Node* depth = BytecodeOperandUImm(2);
@@ -360,7 +347,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
// Slow path when we have to call out to the runtime.
BIND(&slowpath);
{
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -416,8 +403,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
// Slow path when we have to call out to the runtime
BIND(&slowpath);
{
- Node* name_index = BytecodeOperandIdx(0);
- Node* name = LoadConstantPoolEntry(name_index);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* result = CallRuntime(function_id, context, name);
SetAccumulator(result);
Dispatch();
@@ -448,9 +434,8 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof,
// pool entry |name_index|.
IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
Node* bytecode_flags = BytecodeOperandFlag(1);
- Node* name = LoadConstantPoolEntry(index);
Node* context = GetContext();
Variable var_result(this, MachineRepresentation::kTagged);
@@ -510,14 +495,11 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
Node* smi_slot = SmiTag(feedback_slot);
// Load receiver.
- Node* register_index = BytecodeOperandReg(0);
- Node* recv = LoadRegister(register_index);
+ Node* recv = LoadRegisterAtOperandIndex(0);
// Load the name.
// TODO(jgruber): Not needed for monomorphic smi handler constant/field case.
- Node* constant_index = BytecodeOperandIdx(1);
- Node* name = LoadConstantPoolEntry(constant_index);
-
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
Node* context = GetContext();
Label done(this);
@@ -543,8 +525,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC);
Node* code_target = HeapConstant(ic.code());
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* name = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
@@ -565,10 +546,8 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
void StaNamedProperty(Callable ic) {
Node* code_target = HeapConstant(ic.code());
- Node* object_reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(object_reg_index);
- Node* constant_index = BytecodeOperandIdx(1);
- Node* name = LoadConstantPoolEntry(constant_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
@@ -611,10 +590,8 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
Node* code_target = HeapConstant(ic.code());
- Node* object_reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(object_reg_index);
- Node* name_reg_index = BytecodeOperandReg(1);
- Node* name = LoadRegister(name_reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadRegisterAtOperandIndex(1);
Node* value = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(2);
Node* smi_slot = SmiTag(raw_slot);
@@ -638,10 +615,10 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
// This definition is not observable and is used only for definitions
// in object or class literals.
IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) {
- Node* object = LoadRegister(BytecodeOperandReg(0));
- Node* name = LoadRegister(BytecodeOperandReg(1));
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadRegisterAtOperandIndex(1);
Node* value = GetAccumulator();
- Node* flags = SmiFromWord32(BytecodeOperandFlag(2));
+ Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* vector_index = SmiTag(BytecodeOperandIdx(3));
Node* feedback_vector = LoadFeedbackVector();
@@ -749,10 +726,9 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
// Saves the current context in <context>, and pushes the accumulator as the
// new current context.
IGNITION_HANDLER(PushContext, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
Node* new_context = GetAccumulator();
Node* old_context = GetContext();
- StoreRegister(old_context, reg_index);
+ StoreRegisterAtOperandIndex(old_context, 0);
SetContext(new_context);
Dispatch();
}
@@ -761,8 +737,7 @@ IGNITION_HANDLER(PushContext, InterpreterAssembler) {
//
// Pops the current context and sets <context> as the new context.
IGNITION_HANDLER(PopContext, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* context = LoadRegister(reg_index);
+ Node* context = LoadRegisterAtOperandIndex(0);
SetContext(context);
Dispatch();
}
@@ -780,8 +755,7 @@ class InterpreterBinaryOpAssembler : public InterpreterAssembler {
bool lhs_is_smi);
void BinaryOpWithFeedback(BinaryOpGenerator generator) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* lhs = LoadRegister(reg_index);
+ Node* lhs = LoadRegisterAtOperandIndex(0);
Node* rhs = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
@@ -902,8 +876,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
void BitwiseBinaryOpWithFeedback(Operation bitwise_op) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* left = LoadRegister(reg_index);
+ Node* left = LoadRegisterAtOperandIndex(0);
Node* right = GetAccumulator();
Node* context = GetContext();
Node* slot_index = BytecodeOperandIdx(1);
@@ -969,7 +942,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
&var_left_bigint, &var_left_feedback);
BIND(&do_smi_op);
Node* result =
- BitwiseOp(var_left_word32.value(), SmiToWord32(right), bitwise_op);
+ BitwiseOp(var_left_word32.value(), SmiToInt32(right), bitwise_op);
Node* result_type = SelectSmiConstant(TaggedIsSmi(result),
BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
@@ -1279,7 +1252,7 @@ IGNITION_HANDLER(ToName, InterpreterAssembler) {
Node* object = GetAccumulator();
Node* context = GetContext();
Node* result = ToName(context, object);
- StoreRegister(result, BytecodeOperandReg(0));
+ StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1306,7 +1279,7 @@ IGNITION_HANDLER(ToObject, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* context = GetContext();
Node* result = CallStub(callable.descriptor(), target, context, accumulator);
- StoreRegister(result, BytecodeOperandReg(0));
+ StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1449,8 +1422,7 @@ IGNITION_HANDLER(TypeOf, InterpreterAssembler) {
// Delete the property specified in the accumulator from the object
// referenced by the register operand following strict mode semantics.
IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* key = GetAccumulator();
Node* context = GetContext();
Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
@@ -1464,8 +1436,7 @@ IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
// Delete the property specified in the accumulator from the object
// referenced by the register operand following sloppy mode semantics.
IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* key = GetAccumulator();
Node* context = GetContext();
Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
@@ -1482,8 +1453,7 @@ IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) {
Node* active_function = GetAccumulator();
Node* context = GetContext();
Node* result = GetSuperConstructor(active_function, context);
- Node* reg = BytecodeOperandReg(0);
- StoreRegister(result, reg);
+ StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1495,20 +1465,8 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
// Generates code to perform a JS call that collects type feedback.
void JSCall(ConvertReceiverMode receiver_mode) {
- Node* function_reg = BytecodeOperandReg(0);
- Node* function = LoadRegister(function_reg);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* arg_list_count = BytecodeOperandCount(2);
- Node* args_count;
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // The receiver is implied, so it is not in the argument list.
- args_count = arg_list_count;
- } else {
- // Subtract the receiver from the argument count.
- Node* receiver_count = Int32Constant(1);
- args_count = Int32Sub(arg_list_count, receiver_count);
- }
+ Node* function = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
@@ -1517,7 +1475,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
CollectCallFeedback(function, context, feedback_vector, slot_id);
// Call the function and dispatch to the next handler.
- CallJSAndDispatch(function, context, first_arg, args_count, receiver_mode);
+ CallJSAndDispatch(function, context, args, receiver_mode);
}
// Generates code to perform a JS call with a known number of arguments that
@@ -1531,8 +1489,7 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
const int kSlotOperandIndex =
kFirstArgumentOperandIndex + kRecieverAndArgOperandCount;
- Node* function_reg = BytecodeOperandReg(0);
- Node* function = LoadRegister(function_reg);
+ Node* function = LoadRegisterAtOperandIndex(0);
Node* slot_id = BytecodeOperandIdx(kSlotOperandIndex);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
@@ -1548,20 +1505,20 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
case 1:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)),
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
break;
case 3:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)),
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1)),
- LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 2)));
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
+ LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
break;
default:
UNREACHABLE();
@@ -1617,11 +1574,9 @@ IGNITION_HANDLER(CallUndefinedReceiver2, InterpreterJSCallAssembler) {
// registers.
IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
Node* function_id = BytecodeOperandRuntimeId(0);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* context = GetContext();
- Node* result = CallRuntimeN(function_id, context, first_arg, args_count);
+ Node* result = CallRuntimeN(function_id, context, args);
SetAccumulator(result);
Dispatch();
}
@@ -1633,11 +1588,9 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
// arguments in subsequent registers.
IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
Node* function_id = BytecodeOperandIntrinsicId(0);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* arg_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* context = GetContext();
- Node* result = GenerateInvokeIntrinsic(this, function_id, context,
- first_arg_reg, arg_count);
+ Node* result = GenerateInvokeIntrinsic(this, function_id, context, args);
SetAccumulator(result);
Dispatch();
}
@@ -1651,19 +1604,13 @@ IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) {
IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// Call the runtime function.
Node* function_id = BytecodeOperandRuntimeId(0);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* context = GetContext();
- Node* result_pair =
- CallRuntimeN(function_id, context, first_arg, args_count, 2);
+ Node* result_pair = CallRuntimeN(function_id, context, args, 2);
// Store the results in <first_return> and <first_return + 1>
- Node* first_return_reg = BytecodeOperandReg(3);
- Node* second_return_reg = NextRegister(first_return_reg);
Node* result0 = Projection(0, result_pair);
Node* result1 = Projection(1, result_pair);
- StoreRegister(result0, first_return_reg);
- StoreRegister(result1, second_return_reg);
+ StoreRegisterPairAtOperandIndex(result0, result1, 3);
Dispatch();
}
@@ -1673,9 +1620,7 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
// in register |receiver| and |arg_count| arguments in subsequent registers.
IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
Node* context_index = BytecodeOperandNativeContextIndex(0);
- Node* receiver_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(receiver_reg);
- Node* args_count = BytecodeOperandCount(2);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
// Get the function to call from the native context.
Node* context = GetContext();
@@ -1683,7 +1628,7 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
Node* function = LoadContextElement(native_context, context_index);
// Call the function.
- CallJSAndDispatch(function, context, first_arg, args_count,
+ CallJSAndDispatch(function, context, args,
ConvertReceiverMode::kNullOrUndefined);
}
@@ -1694,20 +1639,15 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
// final argument is always a spread.
//
IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
- Node* callable_reg = BytecodeOperandReg(0);
- Node* callable = LoadRegister(callable_reg);
- Node* receiver_reg = BytecodeOperandReg(1);
- Node* receiver_arg = RegisterLocation(receiver_reg);
- Node* receiver_args_count = BytecodeOperandCount(2);
- Node* receiver_count = Int32Constant(1);
- Node* args_count = Int32Sub(receiver_args_count, receiver_count);
+ Node* callable = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
// Call into Runtime function CallWithSpread which does everything.
- CallJSWithSpreadAndDispatch(callable, context, receiver_arg, args_count,
- slot_id, feedback_vector);
+ CallJSWithSpreadAndDispatch(callable, context, args, slot_id,
+ feedback_vector);
}
// ConstructWithSpread <first_arg> <arg_count>
@@ -1718,17 +1658,13 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
//
IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
Node* new_target = GetAccumulator();
- Node* constructor_reg = BytecodeOperandReg(0);
- Node* constructor = LoadRegister(constructor_reg);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ Node* constructor = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Node* result =
- ConstructWithSpread(constructor, context, new_target, first_arg,
- args_count, slot_id, feedback_vector);
+ Node* result = ConstructWithSpread(constructor, context, new_target, args,
+ slot_id, feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1741,16 +1677,13 @@ IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) {
//
IGNITION_HANDLER(Construct, InterpreterAssembler) {
Node* new_target = GetAccumulator();
- Node* constructor_reg = BytecodeOperandReg(0);
- Node* constructor = LoadRegister(constructor_reg);
- Node* first_arg_reg = BytecodeOperandReg(1);
- Node* first_arg = RegisterLocation(first_arg_reg);
- Node* args_count = BytecodeOperandCount(2);
+ Node* constructor = LoadRegisterAtOperandIndex(0);
+ RegListNodePair args = GetRegisterListAtOperandIndex(1);
Node* slot_id = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Node* result = Construct(constructor, context, new_target, first_arg,
- args_count, slot_id, feedback_vector);
+ Node* result = Construct(constructor, context, new_target, args, slot_id,
+ feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -1762,8 +1695,7 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
void CompareOpWithFeedback(Operation compare_op) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* lhs = LoadRegister(reg_index);
+ Node* lhs = LoadRegisterAtOperandIndex(0);
Node* rhs = GetAccumulator();
Node* context = GetContext();
@@ -1844,8 +1776,7 @@ IGNITION_HANDLER(TestGreaterThanOrEqual, InterpreterCompareOpAssembler) {
// Test if the value in the <src> register is strictly equal to the accumulator.
// Type feedback is not collected.
IGNITION_HANDLER(TestEqualStrictNoFeedback, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* lhs = LoadRegister(reg_index);
+ Node* lhs = LoadRegisterAtOperandIndex(0);
Node* rhs = GetAccumulator();
// TODO(5310): This is called only when lhs and rhs are Smis (for ex:
// try-finally or generators) or strings (only when visiting
@@ -1861,8 +1792,7 @@ IGNITION_HANDLER(TestEqualStrictNoFeedback, InterpreterAssembler) {
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
IGNITION_HANDLER(TestIn, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* property = LoadRegister(reg_index);
+ Node* property = LoadRegisterAtOperandIndex(0);
Node* object = GetAccumulator();
Node* context = GetContext();
@@ -1875,8 +1805,7 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) {
- Node* object_reg = BytecodeOperandReg(0);
- Node* object = LoadRegister(object_reg);
+ Node* object = LoadRegisterAtOperandIndex(0);
Node* callable = GetAccumulator();
Node* slot_id = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
@@ -2063,8 +1992,7 @@ IGNITION_HANDLER(Jump, InterpreterAssembler) {
// Jump by the number of bytes in the Smi in the |idx| entry in the constant
// pool.
IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Jump(relative_jump);
}
@@ -2088,8 +2016,7 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
JumpIfWordEqual(accumulator, TrueConstant(), relative_jump);
@@ -2115,8 +2042,7 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
CSA_ASSERT(this, TaggedIsNotSmi(accumulator));
CSA_ASSERT(this, IsBoolean(accumulator));
JumpIfWordEqual(accumulator, FalseConstant(), relative_jump);
@@ -2144,8 +2070,7 @@ IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2176,8 +2101,7 @@ IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -2202,8 +2126,7 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2223,8 +2146,7 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordNotEqual(accumulator, NullConstant(), relative_jump);
}
@@ -2244,8 +2166,7 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2266,8 +2187,7 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
// constant.
IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
JumpIfWordNotEqual(accumulator, UndefinedConstant(), relative_jump);
}
@@ -2297,8 +2217,7 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
// pool if the object referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
- Node* index = BytecodeOperandIdx(0);
- Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
+ Node* relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_object(this), if_notobject(this), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
@@ -2378,11 +2297,10 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) {
- Node* pattern_index = BytecodeOperandIdx(0);
- Node* pattern = LoadConstantPoolEntry(pattern_index);
+ Node* pattern = LoadConstantPoolEntryAtOperandIndex(0);
Node* feedback_vector = LoadFeedbackVector();
Node* slot_id = BytecodeOperandIdx(1);
- Node* flags = SmiFromWord32(BytecodeOperandFlag(2));
+ Node* flags = SmiFromInt32(BytecodeOperandFlag(2));
Node* context = GetContext();
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitCreateRegExpLiteral(
@@ -2421,8 +2339,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
Node* flags_raw = DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
bytecode_flags);
Node* flags = SmiTag(flags_raw);
- Node* index = BytecodeOperandIdx(0);
- Node* constant_elements = LoadConstantPoolEntry(index);
+ Node* constant_elements = LoadConstantPoolEntryAtOperandIndex(0);
Node* result =
CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
SmiTag(slot_id), constant_elements, flags);
@@ -2466,15 +2383,14 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitCreateShallowObjectLiteral(
feedback_vector, slot_id, &if_not_fast_clone);
- StoreRegister(result, BytecodeOperandReg(3));
+ StoreRegisterAtOperandIndex(result, 3);
Dispatch();
}
BIND(&if_not_fast_clone);
{
// If we can't do a fast clone, call into the runtime.
- Node* index = BytecodeOperandIdx(0);
- Node* boilerplate_description = LoadConstantPoolEntry(index);
+ Node* boilerplate_description = LoadConstantPoolEntryAtOperandIndex(0);
Node* context = GetContext();
Node* flags_raw = DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
@@ -2484,7 +2400,7 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
Node* result =
CallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
SmiTag(slot_id), boilerplate_description, flags);
- StoreRegister(result, BytecodeOperandReg(3));
+ StoreRegisterAtOperandIndex(result, 3);
// TODO(klaasb) build a single dispatch once the call is inlined
Dispatch();
}
@@ -2501,19 +2417,34 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
Dispatch();
}
-// GetTemplateObject
+// GetTemplateObject <descriptor_idx> <literal_idx>
//
// Creates the template to pass for tagged templates and returns it in the
// accumulator, creating and caching the site object on-demand as per the
// specification.
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
- Node* description_index = BytecodeOperandIdx(0);
- Node* description = LoadConstantPoolEntry(description_index);
- Node* context = GetContext();
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* slot = BytecodeOperandIdx(1);
+ Node* cached_value =
+ LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS);
- Node* result = CallRuntime(Runtime::kGetTemplateObject, context, description);
- SetAccumulator(result);
+ Label call_runtime(this, Label::kDeferred);
+ GotoIf(WordEqual(cached_value, SmiConstant(0)), &call_runtime);
+
+ SetAccumulator(cached_value);
Dispatch();
+
+ BIND(&call_runtime);
+ {
+ Node* description = LoadConstantPoolEntryAtOperandIndex(0);
+ Node* context = GetContext();
+ Node* result =
+ CallRuntime(Runtime::kCreateTemplateObject, context, description);
+ StoreFeedbackVectorSlot(feedback_vector, slot, result, UPDATE_WRITE_BARRIER,
+ 0, INTPTR_PARAMETERS);
+ SetAccumulator(result);
+ Dispatch();
+ }
}
// CreateClosure <index> <slot> <tenured>
@@ -2521,35 +2452,47 @@ IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* shared = LoadConstantPoolEntry(index);
+ Node* shared = LoadConstantPoolEntryAtOperandIndex(0);
Node* flags = BytecodeOperandFlag(2);
Node* context = GetContext();
-
- Label call_runtime(this, Label::kDeferred);
- GotoIfNot(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
- &call_runtime);
- ConstructorBuiltinsAssembler constructor_assembler(state());
- Node* vector_index = BytecodeOperandIdx(1);
- vector_index = SmiTag(vector_index);
+ Node* slot = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
- SetAccumulator(constructor_assembler.EmitFastNewClosure(
- shared, feedback_vector, vector_index, context));
- Dispatch();
+ Node* feedback_cell = LoadFeedbackVectorSlot(feedback_vector, slot);
- BIND(&call_runtime);
+ Label if_fast(this), if_slow(this, Label::kDeferred);
+ Branch(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), &if_fast,
+ &if_slow);
+
+ BIND(&if_fast);
{
- Node* tenured_raw =
- DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
- Node* tenured = SmiTag(tenured_raw);
- feedback_vector = LoadFeedbackVector();
- vector_index = BytecodeOperandIdx(1);
- vector_index = SmiTag(vector_index);
- Node* result = CallRuntime(Runtime::kInterpreterNewClosure, context, shared,
- feedback_vector, vector_index, tenured);
+ Node* result =
+ CallBuiltin(Builtins::kFastNewClosure, context, shared, feedback_cell);
SetAccumulator(result);
Dispatch();
}
+
+ BIND(&if_slow);
+ {
+ Label if_newspace(this), if_oldspace(this);
+ Branch(IsSetWord32<CreateClosureFlags::PretenuredBit>(flags), &if_oldspace,
+ &if_newspace);
+
+ BIND(&if_newspace);
+ {
+ Node* result =
+ CallRuntime(Runtime::kNewClosure, context, shared, feedback_cell);
+ SetAccumulator(result);
+ Dispatch();
+ }
+
+ BIND(&if_oldspace);
+ {
+ Node* result = CallRuntime(Runtime::kNewClosure_Tenured, context, shared,
+ feedback_cell);
+ SetAccumulator(result);
+ Dispatch();
+ }
+ }
}
// CreateBlockContext <index>
@@ -2557,8 +2500,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
// Creates a new block context with the scope info constant at |index| and the
// closure in the accumulator.
IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
- Node* index = BytecodeOperandIdx(0);
- Node* scope_info = LoadConstantPoolEntry(index);
+ Node* scope_info = LoadConstantPoolEntryAtOperandIndex(0);
Node* closure = GetAccumulator();
Node* context = GetContext();
SetAccumulator(
@@ -2572,12 +2514,9 @@ IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) {
// the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the
// closure in the accumulator.
IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) {
- Node* exception_reg = BytecodeOperandReg(0);
- Node* exception = LoadRegister(exception_reg);
- Node* name_idx = BytecodeOperandIdx(1);
- Node* name = LoadConstantPoolEntry(name_idx);
- Node* scope_info_idx = BytecodeOperandIdx(2);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ Node* exception = LoadRegisterAtOperandIndex(0);
+ Node* name = LoadConstantPoolEntryAtOperandIndex(1);
+ Node* scope_info = LoadConstantPoolEntryAtOperandIndex(2);
Node* closure = GetAccumulator();
Node* context = GetContext();
SetAccumulator(CallRuntime(Runtime::kPushCatchContext, context, name,
@@ -2617,10 +2556,8 @@ IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) {
// with-statement with the object in |register| and the closure in the
// accumulator.
IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) {
- Node* reg_index = BytecodeOperandReg(0);
- Node* object = LoadRegister(reg_index);
- Node* scope_info_idx = BytecodeOperandIdx(1);
- Node* scope_info = LoadConstantPoolEntry(scope_info_idx);
+ Node* object = LoadRegisterAtOperandIndex(0);
+ Node* scope_info = LoadConstantPoolEntryAtOperandIndex(1);
Node* closure = GetAccumulator();
Node* context = GetContext();
SetAccumulator(CallRuntime(Runtime::kPushWithContext, context, object,
@@ -2771,7 +2708,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
BIND(&throw_error);
{
- Node* name = LoadConstantPoolEntry(BytecodeOperandIdx(0));
+ Node* name = LoadConstantPoolEntryAtOperandIndex(0);
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
// We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow);
@@ -2834,10 +2771,10 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
Node* result_pair = \
CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
Node* return_value = Projection(0, result_pair); \
- Node* original_handler = Projection(1, result_pair); \
+ Node* original_bytecode = SmiUntag(Projection(1, result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
- DispatchToBytecodeHandler(original_handler); \
+ DispatchToBytecode(original_bytecode, BytecodeOffset()); \
}
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
@@ -2856,30 +2793,13 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
Dispatch();
}
-class InterpreterForInPrepareAssembler : public InterpreterAssembler {
- public:
- InterpreterForInPrepareAssembler(CodeAssemblerState* state, Bytecode bytecode,
- OperandScale operand_scale)
- : InterpreterAssembler(state, bytecode, operand_scale) {}
-
- void BuildForInPrepareResult(Node* output_register, Node* cache_type,
- Node* cache_array, Node* cache_length) {
- StoreRegister(cache_type, output_register);
- output_register = NextRegister(output_register);
- StoreRegister(cache_array, output_register);
- output_register = NextRegister(output_register);
- StoreRegister(cache_length, output_register);
- }
-};
-
// ForInEnumerate <receiver>
//
// Enumerates the enumerable keys of the |receiver| and either returns the
// map of the |receiver| if it has a usable enum cache or a fixed array
// with the keys to enumerate in the accumulator.
IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
- Node* receiver_register = BytecodeOperandReg(0);
- Node* receiver = LoadRegister(receiver_register);
+ Node* receiver = LoadRegisterAtOperandIndex(0);
Node* context = GetContext();
Label if_empty(this), if_runtime(this, Label::kDeferred);
@@ -2910,9 +2830,8 @@ IGNITION_HANDLER(ForInEnumerate, InterpreterAssembler) {
// The result is output in registers |cache_info_triple| to
// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
// and cache_length respectively.
-IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
+IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
Node* enumerator = GetAccumulator();
- Node* output_register = BytecodeOperandReg(0);
Node* vector_index = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
@@ -2946,8 +2865,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
Node* cache_type = enumerator;
Node* cache_array = enum_keys;
Node* cache_length = SmiTag(enum_length);
- BuildForInPrepareResult(output_register, cache_type, cache_array,
- cache_length);
+ StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
@@ -2964,8 +2882,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
Node* cache_type = enumerator;
Node* cache_array = enumerator;
Node* cache_length = LoadFixedArrayBaseLength(enumerator);
- BuildForInPrepareResult(output_register, cache_type, cache_array,
- cache_length);
+ StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
}
@@ -2974,14 +2891,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
//
// Returns the next enumerable property in the the accumulator.
IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
- Node* receiver_reg = BytecodeOperandReg(0);
- Node* receiver = LoadRegister(receiver_reg);
- Node* index_reg = BytecodeOperandReg(1);
- Node* index = LoadRegister(index_reg);
- Node* cache_type_reg = BytecodeOperandReg(2);
- Node* cache_type = LoadRegister(cache_type_reg);
- Node* cache_array_reg = NextRegister(cache_type_reg);
- Node* cache_array = LoadRegister(cache_array_reg);
+ Node* receiver = LoadRegisterAtOperandIndex(0);
+ Node* index = LoadRegisterAtOperandIndex(1);
+ Node* cache_type;
+ Node* cache_array;
+ std::tie(cache_type, cache_array) = LoadRegisterPairAtOperandIndex(2);
Node* vector_index = BytecodeOperandIdx(3);
Node* feedback_vector = LoadFeedbackVector();
@@ -3017,10 +2931,8 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
//
// Returns false if the end of the enumerable properties has been reached.
IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
- Node* index_reg = BytecodeOperandReg(0);
- Node* index = LoadRegister(index_reg);
- Node* cache_length_reg = BytecodeOperandReg(1);
- Node* cache_length = LoadRegister(cache_length_reg);
+ Node* index = LoadRegisterAtOperandIndex(0);
+ Node* cache_length = LoadRegisterAtOperandIndex(1);
// Check if {index} is at {cache_length} already.
Label if_true(this), if_false(this), end(this);
@@ -3044,8 +2956,7 @@ IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
// Increments the loop counter in register |index| and stores the result
// in the accumulator.
IGNITION_HANDLER(ForInStep, InterpreterAssembler) {
- Node* index_reg = BytecodeOperandReg(0);
- Node* index = LoadRegister(index_reg);
+ Node* index = LoadRegisterAtOperandIndex(0);
Node* one = SmiConstant(1);
Node* result = SmiAdd(index, one);
SetAccumulator(result);
@@ -3078,34 +2989,16 @@ IGNITION_HANDLER(Illegal, InterpreterAssembler) {
//
// Exports the register file and stores it into the generator. Also stores the
// current context, |suspend_id|, and the current bytecode offset (for debugging
-// purposes) into the generator.
+// purposes) into the generator. Then, returns the value in the accumulator.
IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
- Node* generator_reg = BytecodeOperandReg(0);
-
- Node* generator = LoadRegister(generator_reg);
-
- Label if_stepping(this, Label::kDeferred), ok(this);
- Node* step_action_address = ExternalConstant(
- ExternalReference::debug_last_step_action_address(isolate()));
- Node* step_action = Load(MachineType::Int8(), step_action_address);
- STATIC_ASSERT(StepIn > StepNext);
- STATIC_ASSERT(LastStepAction == StepIn);
- Node* step_next = Int32Constant(StepNext);
- Branch(Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
- BIND(&ok);
-
+ Node* generator = LoadRegisterAtOperandIndex(0);
Node* array =
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
Node* context = GetContext();
+ RegListNodePair registers = GetRegisterListAtOperandIndex(1);
Node* suspend_id = BytecodeOperandUImmSmi(3);
- // Bytecode operand 1 should be always 0 (we are always store registers
- // from the beginning).
- CSA_ASSERT(this, WordEqual(BytecodeOperandReg(1),
- IntPtrConstant(Register(0).ToOperand())));
- // Bytecode operand 2 is the number of registers to store to the generator.
- Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(2));
- ExportRegisterFile(array, register_count);
+ ExportRegisterFile(array, registers);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
suspend_id);
@@ -3115,59 +3008,66 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Node* offset = SmiTag(BytecodeOffset());
StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
offset);
- Dispatch();
- BIND(&if_stepping);
- {
- Node* context = GetContext();
- CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
- Goto(&ok);
- }
+ UpdateInterruptBudgetOnReturn();
+ Return(GetAccumulator());
}
-// RestoreGeneratorState <generator>
+// SwitchOnGeneratorState <generator> <table_start> <table_length>
//
-// Loads the generator's state and stores it in the accumulator,
-// before overwriting it with kGeneratorExecuting.
-IGNITION_HANDLER(RestoreGeneratorState, InterpreterAssembler) {
- Node* generator_reg = BytecodeOperandReg(0);
- Node* generator = LoadRegister(generator_reg);
+// If |generator| is undefined, falls through. Otherwise, loads the
+// generator's state (overwriting it with kGeneratorExecuting), sets the context
+// to the generator's resume context, and performs state dispatch on the
+// generator's state by looking up the generator state in a jump table in the
+// constant pool, starting at |table_start|, and of length |table_length|.
+IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
+ Node* generator = LoadRegisterAtOperandIndex(0);
+
+ Label fallthrough(this);
+ GotoIf(WordEqual(generator, UndefinedConstant()), &fallthrough);
- Node* old_state =
+ Node* state =
LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
- Node* new_state = Int32Constant(JSGeneratorObject::kGeneratorExecuting);
+ Node* new_state = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
- SmiTag(new_state));
- SetAccumulator(old_state);
+ new_state);
+
+ Node* context = LoadObjectField(generator, JSGeneratorObject::kContextOffset);
+ SetContext(context);
+
+ Node* table_start = BytecodeOperandIdx(1);
+ // TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't
+ // actually need it otherwise.
+ Node* table_length = BytecodeOperandUImmWord(2);
+
+ // The state must be a Smi.
+ CSA_ASSERT(this, TaggedIsSmi(state));
+ Node* case_value = SmiUntag(state);
+
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0)));
+ CSA_ASSERT(this, IntPtrLessThan(case_value, table_length));
+ USE(table_length);
+
+ Node* entry = IntPtrAdd(table_start, case_value);
+ Node* relative_jump = LoadAndUntagConstantPoolEntry(entry);
+ Jump(relative_jump);
+
+ BIND(&fallthrough);
Dispatch();
}
-// ResumeGenerator <generator> <generator_state> <first output
-// register> <register count>
+// ResumeGenerator <generator> <first output register> <register count>
//
// Imports the register file stored in the generator and marks the generator
// state as executing.
IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
- Node* generator_reg = BytecodeOperandReg(0);
- Node* generator_state_reg = BytecodeOperandReg(1);
- // Bytecode operand 2 is the start register. It should always be 0, so let's
- // ignore it.
- CSA_ASSERT(this, WordEqual(BytecodeOperandReg(2),
- IntPtrConstant(Register(0).ToOperand())));
- // Bytecode operand 3 is the number of registers to store to the generator.
- Node* register_count = ChangeUint32ToWord(BytecodeOperandCount(3));
-
- Node* generator = LoadRegister(generator_reg);
+ Node* generator = LoadRegisterAtOperandIndex(0);
+ RegListNodePair registers = GetRegisterListAtOperandIndex(1);
ImportRegisterFile(
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset),
- register_count);
-
- // Since we're resuming, update the generator state to indicate that the
- // generator is now executing.
- StoreRegister(SmiConstant(JSGeneratorObject::kGeneratorExecuting),
- generator_state_reg);
+ registers);
// Return the generator's input_or_debug_pos in the accumulator.
SetAccumulator(
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 7ad8d49b63..e44289bb6c 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/factory-inl.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
@@ -27,8 +28,8 @@ class IntrinsicsGenerator {
zone_(assembler->zone()),
assembler_(assembler) {}
- Node* InvokeIntrinsic(Node* function_id, Node* context, Node* first_arg_reg,
- Node* arg_count);
+ Node* InvokeIntrinsic(Node* function_id, Node* context,
+ const InterpreterAssembler::RegListNodePair& args);
private:
enum InstanceTypeCompareMode {
@@ -38,18 +39,21 @@ class IntrinsicsGenerator {
Node* IsInstanceType(Node* input, int type);
Node* CompareInstanceType(Node* map, int type, InstanceTypeCompareMode mode);
- Node* IntrinsicAsStubCall(Node* input, Node* context,
- Callable const& callable);
- Node* IntrinsicAsBuiltinCall(Node* input, Node* context, Builtins::Name name);
+ Node* IntrinsicAsStubCall(const InterpreterAssembler::RegListNodePair& args,
+ Node* context, Callable const& callable);
+ Node* IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, Node* context,
+ Builtins::Name name);
void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
- Node* name(Node* input, Node* arg_count, Node* context);
+ Node* name(const InterpreterAssembler::RegListNodePair& args, Node* context);
INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
#undef DECLARE_INTRINSIC_HELPER
Isolate* isolate() { return isolate_; }
Zone* zone() { return zone_; }
+ Factory* factory() { return isolate()->factory(); }
Isolate* isolate_;
Zone* zone_;
@@ -58,19 +62,18 @@ class IntrinsicsGenerator {
DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator);
};
-Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler,
- Node* function_id, Node* context,
- Node* first_arg_reg, Node* arg_count) {
+Node* GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, Node* function_id, Node* context,
+ const InterpreterAssembler::RegListNodePair& args) {
IntrinsicsGenerator generator(assembler);
- return generator.InvokeIntrinsic(function_id, context, first_arg_reg,
- arg_count);
+ return generator.InvokeIntrinsic(function_id, context, args);
}
#define __ assembler_->
-Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
- Node* first_arg_reg,
- Node* arg_count) {
+Node* IntrinsicsGenerator::InvokeIntrinsic(
+ Node* function_id, Node* context,
+ const InterpreterAssembler::RegListNodePair& args) {
InterpreterAssembler::Label abort(assembler_), end(assembler_);
InterpreterAssembler::Variable result(assembler_,
MachineRepresentation::kTagged);
@@ -90,17 +93,17 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
#undef CASE
__ Switch(function_id, &abort, cases, labels, arraysize(cases));
-#define HANDLE_CASE(name, lower_case, expected_arg_count) \
- __ BIND(&lower_case); \
- { \
- if (FLAG_debug_code && expected_arg_count >= 0) { \
- AbortIfArgCountMismatch(expected_arg_count, arg_count); \
- } \
- Node* value = name(first_arg_reg, arg_count, context); \
- if (value) { \
- result.Bind(value); \
- __ Goto(&end); \
- } \
+#define HANDLE_CASE(name, lower_case, expected_arg_count) \
+ __ BIND(&lower_case); \
+ { \
+ if (FLAG_debug_code && expected_arg_count >= 0) { \
+ AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \
+ } \
+ Node* value = name(args, context); \
+ if (value) { \
+ result.Bind(value); \
+ __ Goto(&end); \
+ } \
}
INTRINSICS_LIST(HANDLE_CASE)
#undef HANDLE_CASE
@@ -129,238 +132,195 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
}
Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- // TODO(ishell): Use Select here.
- InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
- return_false(assembler_), end(assembler_);
- Node* arg = __ LoadRegister(input);
- __ GotoIf(__ TaggedIsSmi(arg), &return_false);
-
- Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
- __ Branch(condition, &return_true, &return_false);
-
- __ BIND(&return_true);
- {
- return_value.Bind(__ TrueConstant());
- __ Goto(&end);
- }
-
- __ BIND(&return_false);
- {
- return_value.Bind(__ FalseConstant());
- __ Goto(&end);
- }
-
- __ BIND(&end);
- return return_value.value();
-}
-
-Node* IntrinsicsGenerator::IsJSReceiver(Node* input, Node* arg_count,
- Node* context) {
- // TODO(ishell): Use Select here.
- // TODO(ishell): Use CSA::IsJSReceiverInstanceType here.
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
- end(assembler_);
-
- Node* arg = __ LoadRegister(input);
- __ GotoIf(__ TaggedIsSmi(arg), &return_false);
-
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
- kInstanceTypeGreaterThanOrEqual);
- __ Branch(condition, &return_true, &return_false);
-
- __ BIND(&return_true);
- {
- return_value.Bind(__ TrueConstant());
- __ Goto(&end);
- }
-
- __ BIND(&return_false);
- {
- return_value.Bind(__ FalseConstant());
- __ Goto(&end);
- }
-
- __ BIND(&end);
- return return_value.value();
-}
-
-Node* IntrinsicsGenerator::IsArray(Node* input, Node* arg_count,
- Node* context) {
+ Node* result =
+ __ Select(__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
+ [=] {
+ return __ SelectBooleanConstant(
+ CompareInstanceType(input, type, kInstanceTypeEqual));
+ },
+ MachineRepresentation::kTagged);
+ return result;
+}
+
+Node* IntrinsicsGenerator::IsJSReceiver(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
+ Node* result = __ Select(
+ __ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
+ [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); },
+ MachineRepresentation::kTagged);
+ return result;
+}
+
+Node* IntrinsicsGenerator::IsArray(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsJSProxy(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSProxy(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_PROXY_TYPE);
}
-Node* IntrinsicsGenerator::IsTypedArray(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsTypedArray(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsJSMap(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSMap(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_MAP_TYPE);
}
-Node* IntrinsicsGenerator::IsJSSet(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSSet(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_SET_TYPE);
}
-Node* IntrinsicsGenerator::IsJSWeakMap(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSWeakMap(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_WEAK_MAP_TYPE);
}
-Node* IntrinsicsGenerator::IsJSWeakSet(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::IsJSWeakSet(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_WEAK_SET_TYPE);
}
-Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) {
- // TODO(ishell): Use SelectBooleanConstant here.
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
- InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
- end(assembler_);
-
- Node* arg = __ LoadRegister(input);
-
- __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
- __ BIND(&if_smi);
- {
- return_value.Bind(__ TrueConstant());
- __ Goto(&end);
- }
-
- __ BIND(&if_not_smi);
- {
- return_value.Bind(__ FalseConstant());
- __ Goto(&end);
- }
-
- __ BIND(&end);
- return return_value.value();
+Node* IntrinsicsGenerator::IsSmi(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* input = __ LoadRegisterFromRegisterList(args, 0);
+ return __ SelectBooleanConstant(__ TaggedIsSmi(input));
}
-Node* IntrinsicsGenerator::IntrinsicAsStubCall(Node* args_reg, Node* context,
- Callable const& callable) {
+Node* IntrinsicsGenerator::IntrinsicAsStubCall(
+ const InterpreterAssembler::RegListNodePair& args, Node* context,
+ Callable const& callable) {
int param_count = callable.descriptor().GetParameterCount();
int input_count = param_count + 2; // +2 for target and context
- Node** args = zone()->NewArray<Node*>(input_count);
+ Node** stub_args = zone()->NewArray<Node*>(input_count);
int index = 0;
- args[index++] = __ HeapConstant(callable.code());
+ stub_args[index++] = __ HeapConstant(callable.code());
for (int i = 0; i < param_count; i++) {
- args[index++] = __ LoadRegister(args_reg);
- args_reg = __ NextRegister(args_reg);
+ stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
}
- args[index++] = context;
- return __ CallStubN(callable.descriptor(), 1, input_count, args);
+ stub_args[index++] = context;
+ return __ CallStubN(callable.descriptor(), 1, input_count, stub_args);
}
-Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(Node* input, Node* context,
- Builtins::Name name) {
+Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, Node* context,
+ Builtins::Name name) {
Callable callable = Builtins::CallableFor(isolate_, name);
- return IntrinsicAsStubCall(input, context, callable);
+ return IntrinsicAsStubCall(args, context, callable);
}
-Node* IntrinsicsGenerator::CreateIterResultObject(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::CreateIterResultObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context,
+ args, context,
Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject));
}
-Node* IntrinsicsGenerator::HasProperty(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::HasProperty(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::ToString(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::RejectPromise(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToString));
+ args, context,
+ Builtins::CallableFor(isolate(), Builtins::kRejectPromise));
}
-Node* IntrinsicsGenerator::ToLength(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ResolvePromise(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
+ args, context,
+ Builtins::CallableFor(isolate(), Builtins::kResolvePromise));
}
-Node* IntrinsicsGenerator::ToInteger(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ToString(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToInteger));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToString));
}
-Node* IntrinsicsGenerator::ToNumber(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ToLength(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToNumber));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
}
-Node* IntrinsicsGenerator::ToObject(Node* input, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::ToInteger(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
- input, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToInteger));
}
-Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
- Node* context) {
- // First argument register contains the function target.
- Node* function = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::ToNumber(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsStubCall(
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToNumber));
+}
- // Receiver is the second runtime call argument.
- Node* receiver_reg = __ NextRegister(args_reg);
- Node* receiver_arg = __ RegisterLocation(receiver_reg);
+Node* IntrinsicsGenerator::ToObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsStubCall(
+ args, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
+}
- // Subtract function and receiver from arg count.
- Node* function_and_receiver_count = __ Int32Constant(2);
- Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count);
+Node* IntrinsicsGenerator::Call(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ // First argument register contains the function target.
+ Node* function = __ LoadRegisterFromRegisterList(args, 0);
+
+ // The arguments for the target function are from the second runtime call
+ // argument.
+ InterpreterAssembler::RegListNodePair target_args(
+ __ RegisterLocationInRegisterList(args, 1),
+ __ Int32Sub(args.reg_count(), __ Int32Constant(1)));
if (FLAG_debug_code) {
InterpreterAssembler::Label arg_count_positive(assembler_);
- Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
+ Node* comparison =
+ __ Int32LessThan(target_args.reg_count(), __ Int32Constant(0));
__ GotoIfNot(comparison, &arg_count_positive);
__ Abort(AbortReason::kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&arg_count_positive);
__ BIND(&arg_count_positive);
}
- __ CallJSAndDispatch(function, context, receiver_arg, target_args_count,
+ __ CallJSAndDispatch(function, context, target_args,
ConvertReceiverMode::kAny);
return nullptr; // We never return from the CallJSAndDispatch above.
}
-Node* IntrinsicsGenerator::ClassOf(Node* args_reg, Node* arg_count,
- Node* context) {
- Node* value = __ LoadRegister(args_reg);
- return __ ClassOf(value);
-}
-
-Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
- Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
InterpreterAssembler::Label not_receiver(
assembler_, InterpreterAssembler::Label::kDeferred);
InterpreterAssembler::Label done(assembler_);
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
- Node* sync_iterator = __ LoadRegister(args_reg);
+ Node* sync_iterator = __ LoadRegisterFromRegisterList(args, 0);
__ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
__ GotoIfNot(__ IsJSReceiver(sync_iterator), &not_receiver);
+ Node* const next =
+ __ GetProperty(context, sync_iterator, factory()->next_string());
+
Node* const native_context = __ LoadNativeContext(context);
Node* const map = __ LoadContextElement(
native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX);
@@ -368,6 +328,8 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
__ StoreObjectFieldNoWriteBarrier(
iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
+ __ StoreObjectFieldNoWriteBarrier(iterator,
+ JSAsyncFromSyncIterator::kNextOffset, next);
return_value.Bind(iterator);
__ Goto(&done);
@@ -385,52 +347,41 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
return return_value.value();
}
-Node* IntrinsicsGenerator::CreateJSGeneratorObject(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context,
+Node* IntrinsicsGenerator::CreateJSGeneratorObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
Builtins::kCreateGeneratorObject);
}
-Node* IntrinsicsGenerator::GeneratorGetContext(Node* args_reg, Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
- Node* const value =
- __ LoadObjectField(generator, JSGeneratorObject::kContextOffset);
-
- return value;
-}
-
-Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(Node* args_reg,
- Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* generator = __ LoadRegisterFromRegisterList(args, 0);
Node* const value =
__ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset);
return value;
}
-Node* IntrinsicsGenerator::GeneratorGetResumeMode(Node* args_reg,
- Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::GeneratorGetResumeMode(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* generator = __ LoadRegisterFromRegisterList(args, 0);
Node* const value =
__ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
return value;
}
-Node* IntrinsicsGenerator::GeneratorClose(Node* args_reg, Node* arg_count,
- Node* context) {
- Node* generator = __ LoadRegister(args_reg);
+Node* IntrinsicsGenerator::GeneratorClose(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ Node* generator = __ LoadRegisterFromRegisterList(args, 0);
__ StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kContinuationOffset,
__ SmiConstant(JSGeneratorObject::kGeneratorClosed));
return __ UndefinedConstant();
}
-Node* IntrinsicsGenerator::GetImportMetaObject(Node* args_reg, Node* arg_count,
- Node* context) {
+Node* IntrinsicsGenerator::GetImportMetaObject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* const module_context = __ LoadModuleContext(context);
Node* const module =
__ LoadContextElement(module_context, Context::EXTENSION_INDEX);
@@ -451,21 +402,44 @@ Node* IntrinsicsGenerator::GetImportMetaObject(Node* args_reg, Node* arg_count,
return return_value.value();
}
-Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context,
- Builtins::kAsyncGeneratorReject);
+Node* IntrinsicsGenerator::AsyncFunctionAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncFunctionAwaitUncaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitCaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kAsyncGeneratorAwaitUncaught);
+}
+
+Node* IntrinsicsGenerator::AsyncGeneratorReject(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
}
-Node* IntrinsicsGenerator::AsyncGeneratorResolve(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context,
+Node* IntrinsicsGenerator::AsyncGeneratorResolve(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context,
Builtins::kAsyncGeneratorResolve);
}
-Node* IntrinsicsGenerator::AsyncGeneratorYield(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsBuiltinCall(input, context, Builtins::kAsyncGeneratorYield);
+Node* IntrinsicsGenerator::AsyncGeneratorYield(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield);
}
void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) {
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
index 11442438d5..fd4e167ed0 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_
#define V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_
+#include "src/interpreter/interpreter-assembler.h"
+
namespace v8 {
namespace internal {
@@ -14,13 +16,9 @@ class Node;
namespace interpreter {
-class InterpreterAssembler;
-
-extern compiler::Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler,
- compiler::Node* function_id,
- compiler::Node* context,
- compiler::Node* first_arg_reg,
- compiler::Node* arg_count);
+extern compiler::Node* GenerateInvokeIntrinsic(
+ InterpreterAssembler* assembler, compiler::Node* function_id,
+ compiler::Node* context, const InterpreterAssembler::RegListNodePair& args);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index b9137c8559..6cdfec2d04 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -14,17 +14,19 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
#define INTRINSICS_LIST(V) \
+ V(AsyncFunctionAwaitCaught, async_function_await_caught, 3) \
+ V(AsyncFunctionAwaitUncaught, async_function_await_uncaught, 3) \
+ V(AsyncGeneratorAwaitCaught, async_generator_await_caught, 2) \
+ V(AsyncGeneratorAwaitUncaught, async_generator_await_uncaught, 2) \
V(AsyncGeneratorReject, async_generator_reject, 2) \
V(AsyncGeneratorResolve, async_generator_resolve, 3) \
V(AsyncGeneratorYield, async_generator_yield, 3) \
V(CreateJSGeneratorObject, create_js_generator_object, 2) \
- V(GeneratorGetContext, generator_get_context, 1) \
V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
V(GeneratorGetInputOrDebugPos, generator_get_input_or_debug_pos, 1) \
V(GeneratorClose, generator_close, 1) \
V(GetImportMetaObject, get_import_meta_object, 0) \
V(Call, call, -1) \
- V(ClassOf, class_of, 1) \
V(CreateIterResultObject, create_iter_result_object, 2) \
V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
V(HasProperty, has_property, 2) \
@@ -37,6 +39,8 @@ namespace interpreter {
V(IsJSWeakSet, is_js_weak_set, 1) \
V(IsSmi, is_smi, 1) \
V(IsTypedArray, is_typed_array, 1) \
+ V(RejectPromise, reject_promise, 3) \
+ V(ResolvePromise, resolve_promise, 2) \
V(ToString, to_string, 1) \
V(ToLength, to_length, 1) \
V(ToInteger, to_integer, 1) \
@@ -65,4 +69,4 @@ class IntrinsicsHelper {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index fb74d37df4..0702536b3d 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -29,7 +29,8 @@ namespace interpreter {
class InterpreterCompilationJob final : public CompilationJob {
public:
InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator);
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals);
protected:
Status PrepareJobImpl(Isolate* isolate) final;
@@ -66,11 +67,6 @@ Code* Interpreter::GetAndMaybeDeserializeBytecodeHandler(
if (!isolate_->heap()->IsDeserializeLazyHandler(code)) return code;
DCHECK(FLAG_lazy_handler_deserialization);
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing handler %s\n",
- Bytecodes::ToString(bytecode, operand_scale).c_str());
- }
-
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
code = Snapshot::DeserializeHandler(isolate_, bytecode, operand_scale);
@@ -123,13 +119,17 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) {
? nullptr
: Code::GetCodeFromTargetAddress(code_entry);
Object* old_code = code;
- v->VisitRootPointer(Root::kDispatchTable, &code);
+ v->VisitRootPointer(Root::kDispatchTable, nullptr, &code);
if (code != old_code) {
dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
}
}
}
+int Interpreter::InterruptBudget() {
+ return FLAG_interrupt_budget;
+}
+
namespace {
void MaybePrintAst(ParseInfo* parse_info, CompilationInfo* compilation_info) {
@@ -163,12 +163,14 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
InterpreterCompilationJob::InterpreterCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator)
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals)
: CompilationJob(parse_info->stack_limit(), parse_info, &compilation_info_,
"Ignition", State::kReadyToExecute),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
- generator_(&compilation_info_, parse_info->ast_string_constants()) {}
+ generator_(&compilation_info_, parse_info->ast_string_constants(),
+ eager_inner_literals) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl(
Isolate* isolate) {
@@ -226,10 +228,12 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
return SUCCEEDED;
}
-CompilationJob* Interpreter::NewCompilationJob(ParseInfo* parse_info,
- FunctionLiteral* literal,
- AccountingAllocator* allocator) {
- return new InterpreterCompilationJob(parse_info, literal, allocator);
+CompilationJob* Interpreter::NewCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals) {
+ return new InterpreterCompilationJob(parse_info, literal, allocator,
+ eager_inner_literals);
}
bool Interpreter::IsDispatchTableInitialized() const {
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 7e6d013a29..83dfea89f9 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -27,6 +27,8 @@ class FunctionLiteral;
class ParseInfo;
class RootVisitor;
class SetupIsolateDelegate;
+template <typename>
+class ZoneVector;
namespace interpreter {
@@ -37,10 +39,16 @@ class Interpreter {
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() {}
+ // Returns the interrupt budget which should be used for the profiler counter.
+ static int InterruptBudget();
+
// Creates a compilation job which will generate bytecode for |literal|.
- static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
- FunctionLiteral* literal,
- AccountingAllocator* allocator);
+ // Additionally, if |eager_inner_literals| is not null, adds any eagerly
+ // compilable inner FunctionLiterals to this list.
+ static CompilationJob* NewCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator,
+ ZoneVector<FunctionLiteral*>* eager_inner_literals);
// If the bytecode handler for |bytecode| and |operand_scale| has not yet
// been loaded, deserialize it. Then return the handler.
@@ -72,9 +80,6 @@ class Interpreter {
return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
}
- // The interrupt budget which should be used for the profiler counter.
- static const int kInterruptBudget = 144 * KB;
-
private:
friend class SetupInterpreter;
friend class v8::internal::SetupIsolateDelegate;
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index f51c1cd29a..9da1aa4110 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -131,7 +131,7 @@ bool Isolate::IsArrayConstructorIntact() {
return array_constructor_cell->value() == Smi::FromInt(kProtectorValid);
}
-bool Isolate::IsArraySpeciesLookupChainIntact() {
+bool Isolate::IsSpeciesLookupChainIntact() {
// Note: It would be nice to have debug checks to make sure that the
// species protector is accurate, but this would be hard to do for most of
// what the protector stands for:
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 7165d88d34..38506bfc25 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -20,6 +20,8 @@
#include "src/base/utils/random-number-generator.h"
#include "src/basic-block-profiler.h"
#include "src/bootstrapper.h"
+#include "src/builtins/constants-table-builder.h"
+#include "src/callable.h"
#include "src/cancelable-task.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
@@ -32,6 +34,7 @@
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/ic/stub-cache.h"
+#include "src/instruction-stream.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
@@ -39,6 +42,7 @@
#include "src/log.h"
#include "src/messages.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/promise-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
@@ -110,8 +114,6 @@ void ThreadLocalTop::InitializeInternal() {
rethrowing_message_ = false;
pending_message_obj_ = nullptr;
scheduled_exception_ = nullptr;
- microtask_queue_bailout_index_ = -1;
- microtask_queue_bailout_count_ = 0;
}
@@ -221,16 +223,19 @@ void Isolate::IterateThread(ThreadVisitor* v, char* t) {
void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
- v->VisitRootPointer(Root::kTop, &thread->pending_exception_);
- v->VisitRootPointer(Root::kTop, &thread->wasm_caught_exception_);
- v->VisitRootPointer(Root::kTop, &thread->pending_message_obj_);
- v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(thread->context_)));
- v->VisitRootPointer(Root::kTop, &thread->scheduled_exception_);
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_exception_);
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->wasm_caught_exception_);
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->pending_message_obj_);
+ v->VisitRootPointer(Root::kTop, nullptr,
+ bit_cast<Object**>(&(thread->context_)));
+ v->VisitRootPointer(Root::kTop, nullptr, &thread->scheduled_exception_);
for (v8::TryCatch* block = thread->try_catch_handler(); block != nullptr;
block = block->next_) {
- v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->exception_)));
- v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->message_obj_)));
+ v->VisitRootPointer(Root::kTop, nullptr,
+ bit_cast<Object**>(&(block->exception_)));
+ v->VisitRootPointer(Root::kTop, nullptr,
+ bit_cast<Object**>(&(block->message_obj_)));
}
// Iterate over pointers on native execution stack.
@@ -312,61 +317,44 @@ Handle<String> Isolate::StackTraceString() {
}
}
-void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2,
- unsigned int magic2) {
- PushStackTraceAndDie(magic1, ptr1, ptr2, nullptr, nullptr, nullptr, nullptr,
- nullptr, nullptr, magic2);
+void Isolate::PushStackTraceAndDie(void* ptr1, void* ptr2, void* ptr3,
+ void* ptr4) {
+ StackTraceFailureMessage message(this, ptr1, ptr2, ptr3, ptr4);
+ message.Print();
+ base::OS::Abort();
}
-void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2,
- void* ptr3, void* ptr4, void* ptr5,
- void* ptr6, void* ptr7, void* ptr8,
- unsigned int magic2) {
- const int kMaxStackTraceSize = 32 * KB;
- Handle<String> trace = StackTraceString();
- uint8_t buffer[kMaxStackTraceSize];
- int length = Min(kMaxStackTraceSize - 1, trace->length());
- String::WriteToFlat(*trace, buffer, 0, length);
- buffer[length] = '\0';
- // TODO(dcarney): convert buffer to utf8?
- base::OS::PrintError(
- "Stacktrace:"
- "\n magic1=%x magic2=%x ptr1=%p ptr2=%p ptr3=%p ptr4=%p ptr5=%p "
- "ptr6=%p ptr7=%p ptr8=%p\n\n%s",
- magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8,
- reinterpret_cast<char*>(buffer));
- PushCodeObjectsAndDie(0xDEADC0DE, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7,
- ptr8, 0xDEADC0DE);
-}
-
-void Isolate::PushCodeObjectsAndDie(unsigned int magic1, void* ptr1, void* ptr2,
- void* ptr3, void* ptr4, void* ptr5,
- void* ptr6, void* ptr7, void* ptr8,
- unsigned int magic2) {
- const int kMaxCodeObjects = 16;
- // Mark as volatile to lower the probability of optimizing code_objects
- // away. The first and last entries are set to the magic markers, making it
- // easier to spot the array on the stack.
- void* volatile code_objects[kMaxCodeObjects + 2];
- code_objects[0] = reinterpret_cast<void*>(magic1);
- code_objects[kMaxCodeObjects + 1] = reinterpret_cast<void*>(magic2);
- StackFrameIterator it(this);
- int numCodeObjects = 0;
- for (; !it.done() && numCodeObjects < kMaxCodeObjects; it.Advance()) {
- code_objects[1 + numCodeObjects++] = it.frame()->unchecked_code();
- }
-
- // Keep the top raw code object pointers on the stack in the hope that the
- // corresponding pages end up more frequently in the minidump.
+void StackTraceFailureMessage::Print() volatile {
+ // Print the details of this failure message object, including its own address
+ // to force stack allocation.
base::OS::PrintError(
- "\nCodeObjects (%p length=%i): 1:%p 2:%p 3:%p 4:%p..."
- "\n magic1=%x magic2=%x ptr1=%p ptr2=%p ptr3=%p ptr4=%p ptr5=%p "
- "ptr6=%p ptr7=%p ptr8=%p\n\n",
- static_cast<void*>(code_objects[0]), numCodeObjects,
- static_cast<void*>(code_objects[1]), static_cast<void*>(code_objects[2]),
- static_cast<void*>(code_objects[3]), static_cast<void*>(code_objects[4]),
- magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8);
- base::OS::Abort();
+ "Stacktrace:\n ptr1=%p\n ptr2=%p\n ptr3=%p\n ptr4=%p\n "
+ "failure_message_object=%p\n%s",
+ ptr1_, ptr2_, ptr3_, ptr4_, this, &js_stack_trace_[0]);
+}
+
+StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
+ void* ptr2, void* ptr3,
+ void* ptr4) {
+ isolate_ = isolate;
+ ptr1_ = ptr1;
+ ptr2_ = ptr2;
+ ptr3_ = ptr3;
+ ptr4_ = ptr4;
+ // Write a stracktrace into the {js_stack_trace_} buffer.
+ const size_t buffer_length = arraysize(js_stack_trace_);
+ memset(&js_stack_trace_, 0, buffer_length);
+ FixedStringAllocator fixed(&js_stack_trace_[0], buffer_length - 1);
+ StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
+ isolate->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ // Keeping a reference to the last code objects to increase likelyhood that
+ // they get included in the minidump.
+ const size_t code_objects_length = arraysize(code_objects_);
+ size_t i = 0;
+ StackFrameIterator it(isolate);
+ for (; !it.done() && i < code_objects_length; it.Advance()) {
+ code_objects_[i++] = it.frame()->unchecked_code();
+ }
}
namespace {
@@ -717,16 +705,16 @@ class CaptureStackTraceHelper {
int code_offset;
Handle<ByteArray> source_position_table;
Handle<Object> maybe_cache;
- Handle<NumberDictionary> cache;
+ Handle<SimpleNumberDictionary> cache;
if (!FLAG_optimize_for_size) {
code_offset = summ.code_offset();
source_position_table =
handle(summ.abstract_code()->source_position_table(), isolate_);
maybe_cache = handle(summ.abstract_code()->stack_frame_cache(), isolate_);
- if (maybe_cache->IsNumberDictionary()) {
- cache = Handle<NumberDictionary>::cast(maybe_cache);
+ if (maybe_cache->IsSimpleNumberDictionary()) {
+ cache = Handle<SimpleNumberDictionary>::cast(maybe_cache);
} else {
- cache = NumberDictionary::New(isolate_, 1);
+ cache = SimpleNumberDictionary::New(isolate_, 1);
}
int entry = cache->FindEntry(code_offset);
if (entry != NumberDictionary::kNotFound) {
@@ -759,7 +747,7 @@ class CaptureStackTraceHelper {
frame->set_is_constructor(summ.is_constructor());
frame->set_is_wasm(false);
if (!FLAG_optimize_for_size) {
- auto new_cache = NumberDictionary::Set(cache, code_offset, frame);
+ auto new_cache = SimpleNumberDictionary::Set(cache, code_offset, frame);
if (*new_cache != *cache || !maybe_cache->IsNumberDictionary()) {
AbstractCode::SetStackFrameCache(summ.abstract_code(), new_cache);
}
@@ -1278,10 +1266,11 @@ Object* Isolate::UnwindAndFindHandler() {
// Gather information from the handler.
Code* code = frame->LookupCode();
- return FoundHandler(
- nullptr, code->instruction_start(),
- Smi::ToInt(code->handler_table()->get(0)), code->constant_pool(),
- handler->address() + StackHandlerConstants::kSize, 0);
+ HandlerTable table(code);
+ return FoundHandler(nullptr, code->InstructionStart(),
+ table.LookupReturn(0), code->constant_pool(),
+ handler->address() + StackHandlerConstants::kSize,
+ 0);
}
case StackFrame::WASM_COMPILED: {
@@ -1348,7 +1337,7 @@ Object* Isolate::UnwindAndFindHandler() {
set_deoptimizer_lazy_throw(true);
}
- return FoundHandler(nullptr, code->instruction_start(), offset,
+ return FoundHandler(nullptr, code->InstructionStart(), offset,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1358,7 +1347,7 @@ Object* Isolate::UnwindAndFindHandler() {
StubFrame* stub_frame = static_cast<StubFrame*>(frame);
Code* code = stub_frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table()->length() || !code->is_turbofanned()) {
+ !code->handler_table_offset() || !code->is_turbofanned()) {
break;
}
@@ -1372,7 +1361,7 @@ Object* Isolate::UnwindAndFindHandler() {
StandardFrameConstants::kFixedFrameSizeAboveFp -
stack_slots * kPointerSize;
- return FoundHandler(nullptr, code->instruction_start(), offset,
+ return FoundHandler(nullptr, code->InstructionStart(), offset,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1405,7 +1394,7 @@ Object* Isolate::UnwindAndFindHandler() {
Code* code =
builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return FoundHandler(context, code->instruction_start(), 0,
+ return FoundHandler(context, code->InstructionStart(), 0,
code->constant_pool(), return_sp, frame->fp());
}
@@ -1425,7 +1414,7 @@ Object* Isolate::UnwindAndFindHandler() {
WasmInterpreterEntryFrame* interpreter_frame =
WasmInterpreterEntryFrame::cast(frame);
// TODO(wasm): Implement try-catch in the interpreter.
- interpreter_frame->wasm_instance()->debug_info()->Unwind(frame->fp());
+ interpreter_frame->debug_info()->Unwind(frame->fp());
} break;
default:
@@ -1468,9 +1457,8 @@ HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
// Must have been constructed from a bytecode array.
CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
int code_offset = summary.code_offset();
- BytecodeArray* bytecode = code->GetBytecodeArray();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- int index = table->LookupRange(code_offset, nullptr, &prediction);
+ HandlerTable table(code->GetBytecodeArray());
+ int index = table.LookupRange(code_offset, nullptr, &prediction);
if (index <= 0) continue;
if (prediction == HandlerTable::UNCAUGHT) continue;
return prediction;
@@ -1534,7 +1522,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
case StackFrame::STUB: {
Handle<Code> code(frame->LookupCode());
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table()->length() || !code->is_turbofanned()) {
+ !code->handler_table_offset() || !code->is_turbofanned()) {
break;
}
@@ -2013,6 +2001,7 @@ void Isolate::PopPromise() {
}
namespace {
+
bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
Handle<JSPromise> promise);
@@ -2058,39 +2047,28 @@ bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
return true;
}
- Handle<Object> queue(promise->reject_reactions(), isolate);
- Handle<Object> deferred_promise(promise->deferred_promise(), isolate);
-
- if (queue->IsUndefined(isolate)) {
- return false;
- }
-
- if (queue->IsCallable()) {
- return PromiseHandlerCheck(isolate, Handle<JSReceiver>::cast(queue),
- Handle<JSReceiver>::cast(deferred_promise));
- }
-
- if (queue->IsSymbol()) {
- return InternalPromiseHasUserDefinedRejectHandler(
- isolate, Handle<JSPromise>::cast(deferred_promise));
- }
-
- Handle<FixedArray> queue_arr = Handle<FixedArray>::cast(queue);
- Handle<FixedArray> deferred_promise_arr =
- Handle<FixedArray>::cast(deferred_promise);
- for (int i = 0; i < deferred_promise_arr->length(); i++) {
- Handle<JSReceiver> deferred_promise_item(
- JSReceiver::cast(deferred_promise_arr->get(i)));
- if (queue_arr->get(i)->IsSymbol()) {
- if (InternalPromiseHasUserDefinedRejectHandler(
- isolate, Handle<JSPromise>::cast(deferred_promise_item))) {
- return true;
- }
- } else {
- Handle<JSReceiver> queue_item(JSReceiver::cast(queue_arr->get(i)));
- if (PromiseHandlerCheck(isolate, queue_item, deferred_promise_item)) {
- return true;
+ if (promise->status() == Promise::kPending) {
+ Handle<Object> current(promise->reactions(), isolate);
+ while (!current->IsSmi()) {
+ Handle<PromiseReaction> current_reaction =
+ Handle<PromiseReaction>::cast(current);
+ Handle<HeapObject> payload(current_reaction->payload(), isolate);
+ Handle<JSPromise> current_promise;
+ if (JSPromise::From(payload).ToHandle(&current_promise)) {
+ if (current_reaction->reject_handler()->IsCallable()) {
+ Handle<JSReceiver> current_handler(
+ JSReceiver::cast(current_reaction->reject_handler()), isolate);
+ if (PromiseHandlerCheck(isolate, current_handler, current_promise)) {
+ return true;
+ }
+ } else {
+ if (InternalPromiseHasUserDefinedRejectHandler(isolate,
+ current_promise)) {
+ return true;
+ }
+ }
}
+ current = handle(current_reaction->next(), isolate);
}
}
@@ -2124,7 +2102,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
} else if (frame->type() == StackFrame::STUB) {
Code* code = frame->LookupCode();
if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->handler_table()->length() || !code->is_turbofanned()) {
+ !code->handler_table_offset() || !code->is_turbofanned()) {
continue;
}
catch_prediction = code->GetBuiltinCatchPrediction();
@@ -2617,6 +2595,8 @@ void Isolate::ClearSerializerData() {
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
+ // Make sure that the GC does not post any new tasks.
+ heap_.stop_using_tasks();
debug()->Unload();
if (concurrent_recompilation_enabled()) {
@@ -2665,7 +2645,7 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = nullptr;
- compiler_dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kBlock);
+ compiler_dispatcher_->AbortAll(BlockingBehavior::kBlock);
delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr;
@@ -2689,6 +2669,12 @@ void Isolate::Deinit() {
root_index_map_ = nullptr;
ClearSerializerData();
+
+ for (InstructionStream* stream : off_heap_code_) {
+ CHECK(FLAG_stress_off_heap_code);
+ delete stream;
+ }
+ off_heap_code_.clear();
}
@@ -2774,11 +2760,6 @@ Isolate::~Isolate() {
delete allocator_;
allocator_ = nullptr;
-
-#if USE_SIMULATOR
- Simulator::TearDown(simulator_i_cache_);
- simulator_i_cache_ = nullptr;
-#endif
}
@@ -2845,6 +2826,121 @@ void PrintBuiltinSizes(Isolate* isolate) {
code->instruction_size());
}
}
+
+#ifdef V8_EMBEDDED_BUILTINS
+#ifdef DEBUG
+bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate,
+ int builtin_index) {
+ switch (Builtins::KindOf(builtin_index)) {
+ case Builtins::CPP:
+ case Builtins::TFC:
+ case Builtins::TFH:
+ case Builtins::TFJ:
+ case Builtins::TFS:
+ break;
+ case Builtins::API:
+ case Builtins::ASM:
+ // TODO(jgruber): Extend checks to remaining kinds.
+ return false;
+ }
+
+ Callable callable = Builtins::CallableFor(
+ isolate, static_cast<Builtins::Name>(builtin_index));
+ CallInterfaceDescriptor descriptor = callable.descriptor();
+
+ if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
+ return true;
+ }
+
+ for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
+ Register reg = descriptor.GetRegisterParameter(i);
+ if (reg == kOffHeapTrampolineRegister) return true;
+ }
+
+ return false;
+}
+#endif
+
+void ChangeToOffHeapTrampoline(Isolate* isolate, Handle<Code> code,
+ InstructionStream* stream) {
+ DCHECK(Builtins::IsOffHeapSafe(code->builtin_index()));
+ HandleScope scope(isolate);
+
+ constexpr size_t buffer_size = 256; // Enough to fit the single jmp.
+ byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+
+ // Generate replacement code that simply tail-calls the off-heap code.
+ MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ DCHECK(
+ !BuiltinAliasesOffHeapTrampolineRegister(isolate, code->builtin_index()));
+ DCHECK(!masm.has_frame());
+ {
+ FrameScope scope(&masm, StackFrame::NONE);
+ masm.JumpToInstructionStream(stream);
+ }
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+
+ // Hack in an empty reloc info to satisfy the GC.
+ DCHECK_EQ(0, desc.reloc_size);
+ Handle<ByteArray> reloc_info =
+ isolate->factory()->NewByteArray(desc.reloc_size, TENURED);
+ code->set_relocation_info(*reloc_info);
+
+ // Overwrites the original code.
+ CHECK_LE(desc.instr_size, code->instruction_size());
+ CHECK_IMPLIES(code->has_safepoint_info(),
+ desc.instr_size <= code->safepoint_table_offset());
+ code->CopyFrom(desc);
+
+ // TODO(jgruber): CopyFrom isn't intended to overwrite existing code, and
+ // doesn't update things like instruction_size. The result is a code object in
+ // which the first instructions are overwritten while the rest remain intact
+ // (but are never executed). That's fine for our current purposes, just
+ // manually zero the trailing part.
+
+ DCHECK_LE(desc.instr_size, code->instruction_size());
+ byte* trailing_instruction_start =
+ code->instruction_start() + desc.instr_size;
+ int instruction_size = code->instruction_size();
+ if (code->has_safepoint_info()) {
+ CHECK_LE(code->safepoint_table_offset(), code->instruction_size());
+ instruction_size = code->safepoint_table_offset();
+ CHECK_LE(desc.instr_size, instruction_size);
+ }
+ size_t trailing_instruction_size = instruction_size - desc.instr_size;
+ std::memset(trailing_instruction_start, 0, trailing_instruction_size);
+}
+
+void LogInstructionStream(Isolate* isolate, Code* code,
+ const InstructionStream* stream) {
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+ isolate->logger()->LogInstructionStream(code, stream);
+ }
+}
+
+void MoveBuiltinsOffHeap(Isolate* isolate) {
+ DCHECK(FLAG_stress_off_heap_code);
+ HandleScope scope(isolate);
+ Builtins* builtins = isolate->builtins();
+
+ // Lazy deserialization would defeat our off-heap stress test (we'd
+ // deserialize later without moving off-heap), so force eager
+ // deserialization.
+ Snapshot::EnsureAllBuiltinsAreDeserialized(isolate);
+
+ CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsOffHeapSafe(i)) continue;
+ Handle<Code> code(builtins->builtin(i));
+ InstructionStream* stream = new InstructionStream(*code);
+ LogInstructionStream(isolate, *code, stream);
+ ChangeToOffHeapTrampoline(isolate, code, stream);
+ isolate->PushOffHeapCode(stream);
+ }
+}
+#endif // V8_EMBEDDED_BUILTINS
} // namespace
bool Isolate::Init(StartupDeserializer* des) {
@@ -2954,6 +3050,9 @@ bool Isolate::Init(StartupDeserializer* des) {
if (create_heap_objects) {
// Terminate the partial snapshot cache so we can iterate.
partial_snapshot_cache_.push_back(heap_.undefined_value());
+#ifdef V8_EMBEDDED_BUILTINS
+ builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
+#endif
}
InitializeThreadLocal();
@@ -2986,6 +3085,14 @@ bool Isolate::Init(StartupDeserializer* des) {
store_stub_cache_->Initialize();
setup_delegate_->SetupInterpreter(interpreter_);
+#ifdef V8_EMBEDDED_BUILTINS
+ if (create_heap_objects) {
+ builtins_constants_table_builder_->Finalize();
+ delete builtins_constants_table_builder_;
+ builtins_constants_table_builder_ = nullptr;
+ }
+#endif // V8_EMBEDDED_BUILTINS
+
heap_.NotifyDeserializationComplete();
}
delete setup_delegate_;
@@ -2993,6 +3100,15 @@ bool Isolate::Init(StartupDeserializer* des) {
if (FLAG_print_builtin_size) PrintBuiltinSizes(this);
+#ifdef V8_EMBEDDED_BUILTINS
+ if (FLAG_stress_off_heap_code && !serializer_enabled()) {
+ // Artificially move code off-heap to help find & verify related code
+ // paths. Lazy deserialization should be off to avoid confusion around
+ // replacing just the kDeserializeLazy code object.
+ MoveBuiltinsOffHeap(this);
+ }
+#endif
+
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
clear_pending_message();
@@ -3162,6 +3278,12 @@ void Isolate::DumpAndResetStats() {
}
}
+void Isolate::AbortConcurrentOptimization(BlockingBehavior behavior) {
+ if (concurrent_recompilation_enabled()) {
+ DisallowHeapAllocation no_recursive_gc;
+ optimizing_compile_dispatcher()->Flush(behavior);
+ }
+}
CompilationStatistics* Isolate::GetTurboStatistics() {
if (turbo_statistics() == nullptr)
@@ -3176,8 +3298,7 @@ CodeTracer* Isolate::GetCodeTracer() {
}
bool Isolate::use_optimizer() {
- return FLAG_opt && !serializer_enabled_ &&
- CpuFeatures::SupportsCrankshaft() &&
+ return FLAG_opt && !serializer_enabled_ && CpuFeatures::SupportsOptimizer() &&
!is_precise_count_code_coverage() && !is_block_count_code_coverage();
}
@@ -3368,6 +3489,32 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver) {
return !receiver->HasProxyInPrototype(this);
}
+bool Isolate::IsPromiseHookProtectorIntact() {
+ PropertyCell* promise_hook_cell = heap()->promise_hook_protector();
+ bool is_promise_hook_protector_intact =
+ Smi::ToInt(promise_hook_cell->value()) == kProtectorValid;
+ DCHECK_IMPLIES(is_promise_hook_protector_intact,
+ !promise_hook_or_debug_is_active_);
+ return is_promise_hook_protector_intact;
+}
+
+bool Isolate::IsPromiseThenLookupChainIntact() {
+ PropertyCell* promise_then_cell = heap()->promise_then_protector();
+ bool is_promise_then_protector_intact =
+ Smi::ToInt(promise_then_cell->value()) == kProtectorValid;
+ return is_promise_then_protector_intact;
+}
+
+bool Isolate::IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver) {
+ DisallowHeapAllocation no_gc;
+ if (!receiver->IsJSPromise()) return false;
+ if (!IsInAnyContext(receiver->map()->prototype(),
+ Context::PROMISE_PROTOTYPE_INDEX)) {
+ return false;
+ }
+ return IsPromiseThenLookupChainIntact();
+}
+
void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
if (!object->map()->is_prototype_map()) return;
@@ -3394,11 +3541,11 @@ void Isolate::InvalidateArrayConstructorProtector() {
DCHECK(!IsArrayConstructorIntact());
}
-void Isolate::InvalidateArraySpeciesProtector() {
+void Isolate::InvalidateSpeciesProtector() {
DCHECK(factory()->species_protector()->value()->IsSmi());
- DCHECK(IsArraySpeciesLookupChainIntact());
+ DCHECK(IsSpeciesLookupChainIntact());
factory()->species_protector()->set_value(Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsArraySpeciesLookupChainIntact());
+ DCHECK(!IsSpeciesLookupChainIntact());
}
void Isolate::InvalidateStringLengthOverflowProtector() {
@@ -3427,6 +3574,24 @@ void Isolate::InvalidateArrayBufferNeuteringProtector() {
DCHECK(!IsArrayBufferNeuteringIntact());
}
+void Isolate::InvalidatePromiseHookProtector() {
+ DCHECK(factory()->promise_hook_protector()->value()->IsSmi());
+ DCHECK(IsPromiseHookProtectorIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->promise_hook_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsPromiseHookProtectorIntact());
+}
+
+void Isolate::InvalidatePromiseThenProtector() {
+ DCHECK(factory()->promise_then_protector()->value()->IsSmi());
+ DCHECK(IsPromiseThenLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->promise_then_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsPromiseThenLookupChainIntact());
+}
+
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
DisallowHeapAllocation no_gc;
return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -3588,7 +3753,11 @@ void Isolate::FireCallCompletedCallback() {
}
void Isolate::DebugStateUpdated() {
- promise_hook_or_debug_is_active_ = promise_hook_ || debug()->is_active();
+ bool promise_hook_or_debug_is_active = promise_hook_ || debug()->is_active();
+ if (promise_hook_or_debug_is_active && IsPromiseHookProtectorIntact()) {
+ InvalidatePromiseHookProtector();
+ }
+ promise_hook_or_debug_is_active_ = promise_hook_or_debug_is_active;
}
namespace {
@@ -3698,83 +3867,16 @@ void Isolate::ReportPromiseReject(Handle<JSPromise> promise,
v8::Utils::StackTraceToLocal(stack_trace)));
}
-void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
- MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception) {
- Handle<Object> value(info->value(), this);
- Handle<Object> tasks(info->tasks(), this);
- Handle<JSFunction> promise_handle_fn = promise_handle();
- Handle<Object> undefined = factory()->undefined_value();
- Handle<Object> deferred_promise(info->deferred_promise(), this);
-
- if (deferred_promise->IsFixedArray()) {
- DCHECK(tasks->IsFixedArray());
- Handle<FixedArray> deferred_promise_arr =
- Handle<FixedArray>::cast(deferred_promise);
- Handle<FixedArray> deferred_on_resolve_arr(
- FixedArray::cast(info->deferred_on_resolve()), this);
- Handle<FixedArray> deferred_on_reject_arr(
- FixedArray::cast(info->deferred_on_reject()), this);
- Handle<FixedArray> tasks_arr = Handle<FixedArray>::cast(tasks);
- for (int i = 0; i < deferred_promise_arr->length(); i++) {
- Handle<Object> argv[] = {value, handle(tasks_arr->get(i), this),
- handle(deferred_promise_arr->get(i), this),
- handle(deferred_on_resolve_arr->get(i), this),
- handle(deferred_on_reject_arr->get(i), this)};
- *result = Execution::TryCall(
- this, promise_handle_fn, undefined, arraysize(argv), argv,
- Execution::MessageHandling::kReport, maybe_exception);
- // If execution is terminating, just bail out.
- if (result->is_null() && maybe_exception->is_null()) {
- return;
- }
- }
- } else {
- Handle<Object> argv[] = {value, tasks, deferred_promise,
- handle(info->deferred_on_resolve(), this),
- handle(info->deferred_on_reject(), this)};
- *result = Execution::TryCall(
- this, promise_handle_fn, undefined, arraysize(argv), argv,
- Execution::MessageHandling::kReport, maybe_exception);
- }
-}
-
-void Isolate::PromiseResolveThenableJob(
- Handle<PromiseResolveThenableJobInfo> info, MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception) {
- Handle<JSReceiver> thenable(info->thenable(), this);
- Handle<JSFunction> resolve(info->resolve(), this);
- Handle<JSFunction> reject(info->reject(), this);
- Handle<JSReceiver> then(info->then(), this);
- Handle<Object> argv[] = {resolve, reject};
- *result =
- Execution::TryCall(this, then, thenable, arraysize(argv), argv,
- Execution::MessageHandling::kReport, maybe_exception);
-
- Handle<Object> reason;
- if (maybe_exception->ToHandle(&reason)) {
- DCHECK(result->is_null());
- Handle<Object> reason_arg[] = {reason};
- *result = Execution::TryCall(
- this, reject, factory()->undefined_value(), arraysize(reason_arg),
- reason_arg, Execution::MessageHandling::kReport, maybe_exception);
- }
-}
-
-void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
- DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo() ||
- microtask->IsPromiseResolveThenableJobInfo() ||
- microtask->IsPromiseReactionJobInfo());
+void Isolate::EnqueueMicrotask(Handle<Microtask> microtask) {
Handle<FixedArray> queue(heap()->microtask_queue(), this);
int num_tasks = pending_microtask_count();
- DCHECK(num_tasks <= queue->length());
- if (num_tasks == 0) {
- queue = factory()->NewFixedArray(8);
- heap()->set_microtask_queue(*queue);
- } else if (num_tasks == queue->length()) {
- queue = factory()->CopyFixedArrayAndGrow(queue, num_tasks);
+ DCHECK_LE(num_tasks, queue->length());
+ if (num_tasks == queue->length()) {
+ queue = factory()->CopyFixedArrayAndGrow(queue, std::max(num_tasks, 8));
heap()->set_microtask_queue(*queue);
}
+ DCHECK_LE(8, queue->length());
+ DCHECK_LT(num_tasks, queue->length());
DCHECK(queue->get(num_tasks)->IsUndefined(this));
queue->set(num_tasks, *microtask);
set_pending_microtask_count(num_tasks + 1);
@@ -3785,100 +3887,25 @@ void Isolate::RunMicrotasks() {
// Increase call depth to prevent recursive callbacks.
v8::Isolate::SuppressMicrotaskExecutionScope suppress(
reinterpret_cast<v8::Isolate*>(this));
- is_running_microtasks_ = true;
- RunMicrotasksInternal();
- is_running_microtasks_ = false;
- FireMicrotasksCompletedCallback();
-}
-
+ if (pending_microtask_count()) {
+ is_running_microtasks_ = true;
+ TRACE_EVENT0("v8.execute", "RunMicrotasks");
+ TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
-void Isolate::RunMicrotasksInternal() {
- if (!pending_microtask_count()) return;
- TRACE_EVENT0("v8.execute", "RunMicrotasks");
- TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
-
- do {
- HandleScope handle_scope(this);
- set_microtask_queue_bailout_index(-1);
- set_microtask_queue_bailout_count(-1);
+ HandleScope scope(this);
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> maybe_result = Execution::RunMicrotasks(
this, Execution::MessageHandling::kReport, &maybe_exception);
+ // If execution is terminating, just bail out.
if (maybe_result.is_null() && maybe_exception.is_null()) {
heap()->set_microtask_queue(heap()->empty_fixed_array());
set_pending_microtask_count(0);
- return;
}
-
- Handle<Object> result = maybe_result.ToHandleChecked();
- if (result->IsUndefined(this)) return;
-
- Handle<FixedArray> queue = Handle<FixedArray>::cast(result);
- int num_tasks = microtask_queue_bailout_count();
- DCHECK_GE(microtask_queue_bailout_index(), 0);
-
- Isolate* isolate = this;
- FOR_WITH_HANDLE_SCOPE(
- isolate, int, i = microtask_queue_bailout_index(), i, i < num_tasks,
- i++, {
- Handle<Object> microtask(queue->get(i), this);
-
- if (microtask->IsCallHandlerInfo()) {
- Handle<CallHandlerInfo> callback_info =
- Handle<CallHandlerInfo>::cast(microtask);
- v8::MicrotaskCallback callback =
- v8::ToCData<v8::MicrotaskCallback>(callback_info->callback());
- void* data = v8::ToCData<void*>(callback_info->data());
- callback(data);
- } else {
- SaveContext save(this);
- Context* context;
- if (microtask->IsJSFunction()) {
- context = Handle<JSFunction>::cast(microtask)->context();
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- context = Handle<PromiseResolveThenableJobInfo>::cast(microtask)
- ->context();
- } else {
- context =
- Handle<PromiseReactionJobInfo>::cast(microtask)->context();
- }
-
- set_context(context->native_context());
- handle_scope_implementer_->EnterMicrotaskContext(
- Handle<Context>(context, this));
-
- MaybeHandle<Object> result;
- MaybeHandle<Object> maybe_exception;
-
- if (microtask->IsJSFunction()) {
- Handle<JSFunction> microtask_function =
- Handle<JSFunction>::cast(microtask);
- result = Execution::TryCall(
- this, microtask_function, factory()->undefined_value(), 0,
- nullptr, Execution::MessageHandling::kReport,
- &maybe_exception);
- } else if (microtask->IsPromiseResolveThenableJobInfo()) {
- PromiseResolveThenableJob(
- Handle<PromiseResolveThenableJobInfo>::cast(microtask),
- &result, &maybe_exception);
- } else {
- PromiseReactionJob(
- Handle<PromiseReactionJobInfo>::cast(microtask), &result,
- &maybe_exception);
- }
-
- handle_scope_implementer_->LeaveMicrotaskContext();
-
- // If execution is terminating, just bail out.
- if (result.is_null() && maybe_exception.is_null()) {
- // Clear out any remaining callbacks in the queue.
- heap()->set_microtask_queue(heap()->empty_fixed_array());
- set_pending_microtask_count(0);
- return;
- }
- }
- });
- } while (pending_microtask_count() > 0);
+ CHECK_EQ(0, pending_microtask_count());
+ CHECK_EQ(0, heap()->microtask_queue()->length());
+ is_running_microtasks_ = false;
+ }
+ FireMicrotasksCompletedCallback();
}
void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 8eca55ffd6..5538992af1 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -10,23 +10,23 @@
#include <queue>
#include <vector>
-#include "include/v8-debug.h"
+#include "include/v8.h"
#include "src/allocation.h"
#include "src/base/atomicops.h"
+#include "src/base/macros.h"
#include "src/builtins/builtins.h"
#include "src/contexts.h"
#include "src/date.h"
#include "src/debug/debug-interface.h"
#include "src/execution.h"
#include "src/futex-emulation.h"
-#include "src/global-handles.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/heap/heap.h"
#include "src/messages.h"
#include "src/objects/code.h"
-#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
-#include "src/zone/zone.h"
+#include "src/unicode.h"
namespace v8 {
@@ -49,6 +49,7 @@ class AddressToIndexHashMap;
class AstStringConstants;
class BasicBlockProfiler;
class Bootstrapper;
+class BuiltinsConstantsTableBuilder;
class CallInterfaceDescriptorData;
class CancelableTaskManager;
class CodeEventDispatcher;
@@ -67,6 +68,7 @@ class Debug;
class DeoptimizerData;
class DescriptorLookupCache;
class EmptyStatement;
+class EternalHandles;
class ExternalCallbackScope;
class ExternalReferenceTable;
class Factory;
@@ -75,8 +77,10 @@ class HeapObjectToIndexHashMap;
class HeapProfiler;
class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
+class InstructionStream;
class Logger;
class MaterializedObjectStore;
+class Microtask;
class OptimizingCompileDispatcher;
class PromiseOnStack;
class Redirection;
@@ -372,9 +376,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
- int microtask_queue_bailout_index_;
- int microtask_queue_bailout_count_;
-
private:
void InitializeInternal();
@@ -382,17 +383,6 @@ class ThreadLocalTop BASE_EMBEDDED {
};
-#if USE_SIMULATOR
-
-#define ISOLATE_INIT_SIMULATOR_LIST(V) \
- V(base::CustomMatcherHashMap*, simulator_i_cache, nullptr)
-#else
-
-#define ISOLATE_INIT_SIMULATOR_LIST(V)
-
-#endif
-
-
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
@@ -453,8 +443,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone) \
V(int, last_stack_frame_info_id, 0) \
- V(int, last_console_context_id, 0) \
- ISOLATE_INIT_SIMULATOR_LIST(V)
+ V(int, last_console_context_id, 0)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
@@ -675,18 +664,6 @@ class Isolate {
return &thread_local_top_.js_entry_sp_;
}
- THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_index)
- Address microtask_queue_bailout_index_address() {
- return reinterpret_cast<Address>(
- &thread_local_top_.microtask_queue_bailout_index_);
- }
-
- THREAD_LOCAL_TOP_ACCESSOR(int, microtask_queue_bailout_count)
- Address microtask_queue_bailout_count_address() {
- return reinterpret_cast<Address>(
- &thread_local_top_.microtask_queue_bailout_count_);
- }
-
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
inline Handle<JSGlobalObject> global_object();
@@ -743,16 +720,10 @@ class Isolate {
Handle<String> StackTraceString();
// Stores a stack trace in a stack-allocated temporary buffer which will
// end up in the minidump for debugging purposes.
- NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
- void* ptr2, unsigned int magic2));
- NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
- void* ptr2, void* ptr3, void* ptr4,
- void* ptr5, void* ptr6, void* ptr7,
- void* ptr8, unsigned int magic2));
- NO_INLINE(void PushCodeObjectsAndDie(unsigned int magic, void* ptr1,
- void* ptr2, void* ptr3, void* ptr4,
- void* ptr5, void* ptr6, void* ptr7,
- void* ptr8, unsigned int magic2));
+ NO_INLINE(void PushStackTraceAndDie(void* ptr1 = nullptr,
+ void* ptr2 = nullptr,
+ void* ptr3 = nullptr,
+ void* ptr4 = nullptr));
Handle<FixedArray> CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options);
Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
@@ -1104,7 +1075,7 @@ class Isolate {
bool IsNoElementsProtectorIntact(Context* context);
bool IsNoElementsProtectorIntact();
- inline bool IsArraySpeciesLookupChainIntact();
+ inline bool IsSpeciesLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
inline bool IsStringLengthOverflowIntact();
@@ -1116,6 +1087,15 @@ class Isolate {
// Make sure we do check for neutered array buffers.
inline bool IsArrayBufferNeuteringIntact();
+ // Disable promise optimizations if promise (debug) hooks have ever been
+ // active.
+ bool IsPromiseHookProtectorIntact();
+
+ // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
+ // initial %PromisePrototype% yields the initial method.
+ bool IsPromiseThenLookupChainIntact();
+ bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
+
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
// object prototype. Also ensure that changes to prototype chain between
@@ -1131,11 +1111,13 @@ class Isolate {
UpdateNoElementsProtectorOnSetElement(object);
}
void InvalidateArrayConstructorProtector();
- void InvalidateArraySpeciesProtector();
+ void InvalidateSpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
void InvalidateArrayIteratorProtector();
void InvalidateArrayBufferNeuteringProtector();
+ V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
+ void InvalidatePromiseThenProtector();
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1161,6 +1143,9 @@ class Isolate {
OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
return optimizing_compile_dispatcher_;
}
+ // Flushes all pending concurrent optimzation jobs from the optimizing
+ // compile dispatcher's queue.
+ void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
int id() const { return static_cast<int>(id_); }
@@ -1176,8 +1161,8 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
- bool force_slow_path() { return force_slow_path_; }
-
+ void set_force_slow_path(bool v) { force_slow_path_ = v; }
+ bool force_slow_path() const { return force_slow_path_; }
bool* force_slow_path_address() { return &force_slow_path_; }
V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
@@ -1215,14 +1200,7 @@ class Isolate {
void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
- void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
- MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception);
- void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
- MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception);
-
- void EnqueueMicrotask(Handle<Object> microtask);
+ void EnqueueMicrotask(Handle<Microtask> microtask);
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
@@ -1266,6 +1244,16 @@ class Isolate {
return &partial_snapshot_cache_;
}
+ void PushOffHeapCode(InstructionStream* stream) {
+ off_heap_code_.emplace_back(stream);
+ }
+
+#ifdef V8_EMBEDDED_BUILTINS
+ BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
+ return builtins_constants_table_builder_;
+ }
+#endif
+
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
}
@@ -1317,10 +1305,6 @@ class Isolate {
PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
-#ifdef USE_SIMULATOR
- base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
-#endif
-
void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
bool allow_atomics_wait() { return allow_atomics_wait_; }
@@ -1476,8 +1460,6 @@ class Isolate {
// then return true.
bool PropagatePendingExceptionToExternalTryCatch();
- void RunMicrotasksInternal();
-
const char* RAILModeName(RAILMode rail_mode) const {
switch (rail_mode) {
case PERFORMANCE_RESPONSE:
@@ -1639,6 +1621,18 @@ class Isolate {
std::vector<Object*> partial_snapshot_cache_;
+ // Stores off-heap instruction streams. Only used if --stress-off-heap-code
+ // is enabled.
+ // TODO(jgruber,v8:6666): Remove once isolate-independent builtins are
+ // implemented. Also remove friend class below.
+ std::vector<InstructionStream*> off_heap_code_;
+
+#ifdef V8_EMBEDDED_BUILTINS
+ // Used during builtins compilation to build the builtins constants table,
+ // which is stored on the root list prior to serialization.
+ BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
+#endif
+
v8::ArrayBuffer::Allocator* array_buffer_allocator_;
FutexWaitListNode futex_wait_list_node_;
@@ -1650,10 +1644,6 @@ class Isolate {
v8::Isolate::AbortOnUncaughtExceptionCallback
abort_on_uncaught_exception_callback_;
-#ifdef USE_SIMULATOR
- base::Mutex simulator_i_cache_mutex_;
-#endif
-
bool allow_atomics_wait_;
ManagedObjectFinalizer managed_object_finalizers_list_;
@@ -1671,17 +1661,18 @@ class Isolate {
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class heap::HeapTester;
+ friend class InstructionStream;
friend class OptimizingCompileDispatcher;
- friend class SweeperThread;
- friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
+ friend class SweeperThread;
friend class TestIsolate;
friend class ThreadId;
+ friend class ThreadManager;
friend class v8::Isolate;
friend class v8::Locker;
- friend class v8::Unlocker;
friend class v8::SnapshotCreator;
+ friend class v8::Unlocker;
friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
const char*);
@@ -1897,6 +1888,29 @@ class CodeTracer final : public Malloced {
int scope_depth_;
};
+class StackTraceFailureMessage {
+ public:
+ explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
+ void* ptr2 = nullptr, void* ptr3 = nullptr,
+ void* ptr4 = nullptr);
+
+ V8_NOINLINE void Print() volatile;
+
+ static const uintptr_t kStartMarker = 0xdecade30;
+ static const uintptr_t kEndMarker = 0xdecade31;
+ static const int kStacktraceBufferSize = 32 * KB;
+
+ uintptr_t start_marker_ = kStartMarker;
+ void* isolate_;
+ void* ptr1_;
+ void* ptr2_;
+ void* ptr3_;
+ void* ptr4_;
+ void* code_objects_[4];
+ char js_stack_trace_[kStacktraceBufferSize];
+ uintptr_t end_marker_ = kEndMarker;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/js/OWNERS b/deps/v8/src/js/OWNERS
index 0108c712e3..de2152c056 100644
--- a/deps/v8/src/js/OWNERS
+++ b/deps/v8/src/js/OWNERS
@@ -7,7 +7,6 @@ ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
littledan@chromium.org
-rossberg@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 7605fc1a7d..46096a0ba5 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -1153,81 +1153,6 @@ DEFINE_METHOD_LEN(
1 /* Set function length */
);
-
-// ES6, draft 10-14-14, section 22.1.2.1
-DEFINE_METHOD_LEN(
- GlobalArray,
- 'from'(arrayLike, mapfn, receiver) {
- var items = TO_OBJECT(arrayLike);
- var mapping = !IS_UNDEFINED(mapfn);
-
- if (mapping) {
- if (!IS_CALLABLE(mapfn)) {
- throw %make_type_error(kCalledNonCallable, mapfn);
- }
- }
-
- var iterable = GetMethod(items, iteratorSymbol);
- var k;
- var result;
- var mappedValue;
- var nextValue;
-
- if (!IS_UNDEFINED(iterable)) {
- result = %IsConstructor(this) ? new this() : [];
- k = 0;
-
- for (nextValue of
- { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- %CreateDataProperty(result, k, mappedValue);
- k++;
- }
- result.length = k;
- return result;
- } else {
- var len = TO_LENGTH(items.length);
- result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
-
- for (k = 0; k < len; ++k) {
- nextValue = items[k];
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- %CreateDataProperty(result, k, mappedValue);
- }
-
- result.length = k;
- return result;
- }
- },
- 1 /* Set function length. */
-);
-
-// ES6, draft 05-22-14, section 22.1.2.3
-DEFINE_METHOD(
- GlobalArray,
- of(...args) {
- var length = args.length;
- var constructor = this;
- // TODO: Implement IsConstructor (ES6 section 7.2.5)
- var array = %IsConstructor(constructor) ? new constructor(length) : [];
- for (var i = 0; i < length; i++) {
- %CreateDataProperty(array, i, args[i]);
- }
- array.length = length;
- return array;
- }
-);
-
-// -------------------------------------------------------------------
-
// Set up unscopable properties on the Array.prototype object.
var unscopables = {
__proto__: null,
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 32f826691d..0d6b670367 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -177,13 +177,7 @@ extrasUtils.createPrivateSymbol = function createPrivateSymbol(name) {
//
// Technically they could all be derived from combinations of
// Function.prototype.{bind,call,apply} but that introduces lots of layers of
-// indirection and slowness given how un-optimized bind is.
-
-extrasUtils.simpleBind = function simpleBind(func, thisArg) {
- return function(...args) {
- return %reflect_apply(func, thisArg, args);
- };
-};
+// indirection.
extrasUtils.uncurryThis = function uncurryThis(func) {
return function(thisArg, ...args) {
@@ -191,11 +185,6 @@ extrasUtils.uncurryThis = function uncurryThis(func) {
};
};
-// We pass true to trigger the debugger's on exception handler.
-extrasUtils.rejectPromise = function rejectPromise(promise, reason) {
- %promise_internal_reject(promise, reason, true);
-}
-
extrasUtils.markPromiseAsHandled = function markPromiseAsHandled(promise) {
%PromiseMarkAsHandled(promise);
};
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 7fa638fa89..18998cf9be 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -15,19 +15,11 @@
var ArrayToString = utils.ImportNow("ArrayToString");
var GetIterator;
var GetMethod;
-var GlobalArray = global.Array;
-var GlobalArrayBuffer = global.ArrayBuffer;
-var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
-var GlobalObject = global.Object;
var InnerArrayJoin;
var InnerArraySort;
var InnerArrayToLocaleString;
var InternalArray = utils.InternalArray;
-var MathMax = global.Math.max;
-var MathMin = global.Math.min;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array, 1)
@@ -39,6 +31,8 @@ FUNCTION(Int32Array, 4)
FUNCTION(Float32Array, 4)
FUNCTION(Float64Array, 8)
FUNCTION(Uint8ClampedArray, 1)
+FUNCTION(BigUint64Array, 8)
+FUNCTION(BigInt64Array, 8)
endmacro
macro DECLARE_GLOBALS(NAME, SIZE)
@@ -47,14 +41,6 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
-macro IS_ARRAYBUFFER(arg)
-(%_ClassOf(arg) === 'ArrayBuffer')
-endmacro
-
-macro IS_SHAREDARRAYBUFFER(arg)
-(%_ClassOf(arg) === 'SharedArrayBuffer')
-endmacro
-
macro IS_TYPEDARRAY(arg)
(%_IsTypedArray(arg))
endmacro
@@ -69,25 +55,6 @@ utils.Import(function(from) {
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
});
-// ES2015 7.3.20
-function SpeciesConstructor(object, defaultConstructor) {
- var constructor = object.constructor;
- if (IS_UNDEFINED(constructor)) {
- return defaultConstructor;
- }
- if (!IS_RECEIVER(constructor)) {
- throw %make_type_error(kConstructorNotReceiver);
- }
- var species = constructor[speciesSymbol];
- if (IS_NULL_OR_UNDEFINED(species)) {
- return defaultConstructor;
- }
- if (%IsConstructor(species)) {
- return species;
- }
- throw %make_type_error(kSpeciesNotConstructor);
-}
-
// --------------- Typed Arrays ---------------------
// ES6 section 22.2.3.5.1 ValidateTypedArray ( O )
@@ -98,20 +65,6 @@ function ValidateTypedArray(array, methodName) {
throw %make_type_error(kDetachedOperation, methodName);
}
-function TypedArrayDefaultConstructor(typedArray) {
- switch (%_ClassOf(typedArray)) {
-macro TYPED_ARRAY_CONSTRUCTOR_CASE(NAME, ELEMENT_SIZE)
- case "NAME":
- return GlobalNAME;
-endmacro
-TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR_CASE)
- }
- // The TypeError should not be generated since all callers should
- // have already called ValidateTypedArray.
- throw %make_type_error(kIncompatibleMethodReceiver,
- "TypedArrayDefaultConstructor", this);
-}
-
function TypedArrayCreate(constructor, arg0, arg1, arg2) {
if (IS_UNDEFINED(arg1)) {
var newTypedArray = new constructor(arg0);
@@ -125,174 +78,6 @@ function TypedArrayCreate(constructor, arg0, arg1, arg2) {
return newTypedArray;
}
-function TypedArraySpeciesCreate(exemplar, arg0, arg1, arg2) {
- var defaultConstructor = TypedArrayDefaultConstructor(exemplar);
- var constructor = SpeciesConstructor(exemplar, defaultConstructor);
- return TypedArrayCreate(constructor, arg0, arg1, arg2);
-}
-
-macro TYPED_ARRAY_CONSTRUCTOR(NAME, ELEMENT_SIZE)
-function NAMEConstructByIterable(obj, iterable, iteratorFn) {
- if (%IterableToListCanBeElided(iterable)) {
- // This .length access is unobservable, because it being observable would
- // mean that iteration has side effects, and we wouldn't reach this path.
- %typed_array_construct_by_array_like(
- obj, iterable, iterable.length, ELEMENT_SIZE);
- } else {
- var list = new InternalArray();
- // Reading the Symbol.iterator property of iterable twice would be
- // observable with getters, so instead, we call the function which
- // was already looked up, and wrap it in another iterable. The
- // __proto__ of the new iterable is set to null to avoid any chance
- // of modifications to Object.prototype being observable here.
- var iterator = %_Call(iteratorFn, iterable);
- var newIterable = {
- __proto__: null
- };
- // TODO(littledan): Computed properties don't work yet in nosnap.
- // Rephrase when they do.
- newIterable[iteratorSymbol] = function() { return iterator; }
- for (var value of newIterable) {
- list.push(value);
- }
- %typed_array_construct_by_array_like(obj, list, list.length, ELEMENT_SIZE);
- }
-}
-
-// ES#sec-typedarray-typedarray TypedArray ( typedArray )
-function NAMEConstructByTypedArray(obj, typedArray) {
- // TODO(littledan): Throw on detached typedArray
- var srcData = %TypedArrayGetBuffer(typedArray);
- var length = %_TypedArrayGetLength(typedArray);
- var byteLength = %_ArrayBufferViewGetByteLength(typedArray);
- var newByteLength = length * ELEMENT_SIZE;
- %typed_array_construct_by_array_like(obj, typedArray, length, ELEMENT_SIZE);
- // The spec requires that constructing a typed array using a SAB-backed typed
- // array use the ArrayBuffer constructor, not the species constructor. See
- // https://tc39.github.io/ecma262/#sec-typedarray-typedarray.
- var bufferConstructor = IS_SHAREDARRAYBUFFER(srcData)
- ? GlobalArrayBuffer
- : SpeciesConstructor(srcData, GlobalArrayBuffer);
- var prototype = bufferConstructor.prototype;
- // TODO(littledan): Use the right prototype based on bufferConstructor's realm
- if (IS_RECEIVER(prototype) && prototype !== GlobalArrayBufferPrototype) {
- %InternalSetPrototype(%TypedArrayGetBuffer(obj), prototype);
- }
-}
-
-function NAMEConstructor(arg1, arg2, arg3) {
- if (!IS_UNDEFINED(new.target)) {
- if (IS_ARRAYBUFFER(arg1) || IS_SHAREDARRAYBUFFER(arg1)) {
- %typed_array_construct_by_array_buffer(
- this, arg1, arg2, arg3, ELEMENT_SIZE);
- } else if (IS_TYPEDARRAY(arg1)) {
- NAMEConstructByTypedArray(this, arg1);
- } else if (IS_RECEIVER(arg1)) {
- var iteratorFn = arg1[iteratorSymbol];
- if (IS_UNDEFINED(iteratorFn)) {
- %typed_array_construct_by_array_like(
- this, arg1, arg1.length, ELEMENT_SIZE);
- } else {
- NAMEConstructByIterable(this, arg1, iteratorFn);
- }
- } else {
- %typed_array_construct_by_length(this, arg1, ELEMENT_SIZE);
- }
- } else {
- throw %make_type_error(kConstructorNotFunction, "NAME")
- }
-}
-
-function NAMESubArray(begin, end) {
- var beginInt = TO_INTEGER(begin);
- if (!IS_UNDEFINED(end)) {
- var endInt = TO_INTEGER(end);
- var srcLength = %_TypedArrayGetLength(this);
- } else {
- var srcLength = %_TypedArrayGetLength(this);
- var endInt = srcLength;
- }
-
- if (beginInt < 0) {
- beginInt = MathMax(0, srcLength + beginInt);
- } else {
- beginInt = MathMin(beginInt, srcLength);
- }
-
- if (endInt < 0) {
- endInt = MathMax(0, srcLength + endInt);
- } else {
- endInt = MathMin(endInt, srcLength);
- }
-
- if (endInt < beginInt) {
- endInt = beginInt;
- }
-
- var newLength = endInt - beginInt;
- var beginByteOffset =
- %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
- return TypedArraySpeciesCreate(this, %TypedArrayGetBuffer(this),
- beginByteOffset, newLength);
-}
-endmacro
-
-TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
-
-DEFINE_METHOD(
- GlobalTypedArray.prototype,
- subarray(begin, end) {
- switch (%_ClassOf(this)) {
-macro TYPED_ARRAY_SUBARRAY_CASE(NAME, ELEMENT_SIZE)
- case "NAME":
- return %_Call(NAMESubArray, this, begin, end);
-endmacro
-TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
- }
- throw %make_type_error(kIncompatibleMethodReceiver,
- "get %TypedArray%.prototype.subarray", this);
- }
-);
-
-
-// The following functions cannot be made efficient on sparse arrays while
-// preserving the semantics, since the calls to the receiver function can add
-// or delete elements from the array.
-function InnerTypedArrayFilter(f, receiver, array, length, result) {
- var result_length = 0;
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_Call(f, receiver, element, i, array)) {
- %CreateDataProperty(result, result_length, element);
- result_length++;
- }
- }
- }
- return result;
-}
-
-
-// ES6 draft 07-15-13, section 22.2.3.9
-DEFINE_METHOD_LEN(
- GlobalTypedArray.prototype,
- filter(f, thisArg) {
- ValidateTypedArray(this, "%TypeArray%.prototype.filter");
-
- var length = %_TypedArrayGetLength(this);
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var result = new InternalArray();
- InnerTypedArrayFilter(f, thisArg, this, length, result);
- var captured = result.length;
- var output = TypedArraySpeciesCreate(this, captured);
- for (var i = 0; i < captured; i++) {
- output[i] = result[i];
- }
- return output;
- },
- 1 /* Set function length. */
-);
-
// ES6 draft 05-18-15, section 22.2.3.25
DEFINE_METHOD(
GlobalTypedArray.prototype,
@@ -339,71 +124,6 @@ DEFINE_METHOD(
}
);
-
-// ES6 draft 08-24-14, section 22.2.2.2
-DEFINE_METHOD(
- GlobalTypedArray,
- of() {
- var length = arguments.length;
- var array = TypedArrayCreate(this, length);
- for (var i = 0; i < length; i++) {
- array[i] = arguments[i];
- }
- return array;
- }
-);
-
-
-// ES#sec-iterabletoarraylike Runtime Semantics: IterableToArrayLike( items )
-function IterableToArrayLike(items) {
- var iterable = GetMethod(items, iteratorSymbol);
- if (!IS_UNDEFINED(iterable)) {
- var internal_array = new InternalArray();
- var i = 0;
- for (var value of
- { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
- internal_array[i] = value;
- i++;
- }
- var array = [];
- %MoveArrayContents(internal_array, array);
- return array;
- }
- return TO_OBJECT(items);
-}
-
-
-// ES#sec-%typedarray%.from
-// %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
-DEFINE_METHOD_LEN(
- GlobalTypedArray,
- 'from'(source, mapfn, thisArg) {
- if (!%IsConstructor(this)) throw %make_type_error(kNotConstructor, this);
- var mapping;
- if (!IS_UNDEFINED(mapfn)) {
- if (!IS_CALLABLE(mapfn)) throw %make_type_error(kCalledNonCallable, this);
- mapping = true;
- } else {
- mapping = false;
- }
- var arrayLike = IterableToArrayLike(source);
- var length = TO_LENGTH(arrayLike.length);
- var targetObject = TypedArrayCreate(this, length);
- var value, mappedValue;
- for (var i = 0; i < length; i++) {
- value = arrayLike[i];
- if (mapping) {
- mappedValue = %_Call(mapfn, thisArg, value, i);
- } else {
- mappedValue = value;
- }
- targetObject[i] = mappedValue;
- }
- return targetObject;
- },
- 1 /* Set function length. */
-);
-
// TODO(bmeurer): Migrate this to a proper builtin.
function TypedArrayConstructor() {
throw %make_type_error(kConstructAbstractClass, "TypedArray");
@@ -417,11 +137,4 @@ function TypedArrayConstructor() {
%AddNamedProperty(GlobalTypedArray.prototype, "toString", ArrayToString,
DONT_ENUM);
-
-macro SETUP_TYPED_ARRAY(NAME, ELEMENT_SIZE)
- %SetCode(GlobalNAME, NAMEConstructor);
-endmacro
-
-TYPED_ARRAYS(SETUP_TYPED_ARRAY)
-
})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 57e7fff8c5..2d9593091d 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -416,7 +416,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
if (!follow_expected) {
// If the expected transition failed, parse an internalized string and
// try to find a matching transition.
- key = ParseJsonInternalizedString();
+ key = ParseJsonString();
if (key.is_null()) return ReportUnexpectedCharacter();
target = TransitionsAccessor(map).FindTransitionToField(key);
@@ -491,7 +491,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
Handle<String> key;
Handle<Object> value;
- key = ParseJsonInternalizedString();
+ key = ParseJsonString();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
AdvanceSkipWhitespace();
@@ -812,7 +812,6 @@ Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
}
template <bool seq_one_byte>
-template <bool is_internalized>
Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
DCHECK_EQ('"', c0_);
Advance();
@@ -821,7 +820,7 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
return factory()->empty_string();
}
- if (seq_one_byte && is_internalized) {
+ if (seq_one_byte) {
// Fast path for existing internalized strings. If the the string being
// parsed is not a known internalized string, contains backslashes or
// unexpectedly reaches the end of string, return with an empty handle.
@@ -829,9 +828,13 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
// We intentionally use local variables instead of fields, compute hash
// while we are iterating a string and manually inline StringTable lookup
// here.
- uint32_t running_hash = isolate()->heap()->HashSeed();
+
int position = position_;
uc32 c0 = c0_;
+ uint32_t running_hash = isolate()->heap()->HashSeed();
+ uint32_t index = 0;
+ bool is_array_index = true;
+
do {
if (c0 == '\\') {
c0_ = c0;
@@ -845,6 +848,16 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
position_ = position;
return Handle<String>::null();
}
+ if (is_array_index) {
+ // With leading zero, the string has to be "0" to be a valid index.
+ if (!IsDecimalDigit(c0) || (position > position_ && index == 0)) {
+ is_array_index = false;
+ } else {
+ int d = c0 - '0';
+ is_array_index = index <= 429496729U - ((d + 3) >> 3);
+ index = (index * 10) + d;
+ }
+ }
running_hash = StringHasher::AddCharacterCore(running_hash,
static_cast<uint16_t>(c0));
position++;
@@ -856,9 +869,15 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
c0 = seq_source_->SeqOneByteStringGet(position);
} while (c0 != '"');
int length = position - position_;
- uint32_t hash = (length <= String::kMaxHashCalcLength)
- ? StringHasher::GetHashCore(running_hash)
- : static_cast<uint32_t>(length);
+ uint32_t hash;
+ if (is_array_index) {
+ hash =
+ StringHasher::MakeArrayIndexHash(index, length) >> String::kHashShift;
+ } else if (length <= String::kMaxHashCalcLength) {
+ hash = StringHasher::GetHashCore(running_hash);
+ } else {
+ hash = static_cast<uint32_t>(length);
+ }
Vector<const uint8_t> string_vector(seq_source_->GetChars() + position_,
length);
StringTable* string_table = isolate()->heap()->string_table();
@@ -877,12 +896,8 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
if (!element->IsTheHole(isolate()) &&
String::cast(element)->IsOneByteEqualTo(string_vector)) {
result = Handle<String>(String::cast(element), isolate());
-#ifdef DEBUG
- uint32_t hash_field =
- (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
- DCHECK_EQ(static_cast<int>(result->Hash()),
- static_cast<int>(hash_field >> String::kHashShift));
-#endif
+ DCHECK_EQ(result->Hash(),
+ (hash << String::kHashShift) >> String::kHashShift);
break;
}
entry = StringTable::NextProbe(entry, count++, capacity);
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 6566c92e40..d76f642b38 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -75,19 +75,14 @@ class JsonParser BASE_EMBEDDED {
// literals. The string must only be double-quoted (not single-quoted), and
// the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
// four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
- Handle<String> ParseJsonString() {
- return ScanJsonString<false>();
- }
-
bool ParseJsonString(Handle<String> expected);
- Handle<String> ParseJsonInternalizedString() {
- Handle<String> result = ScanJsonString<true>();
+ Handle<String> ParseJsonString() {
+ Handle<String> result = ScanJsonString();
if (result.is_null()) return result;
return factory()->InternalizeString(result);
}
- template <bool is_internalized>
Handle<String> ScanJsonString();
// Creates a new string and copies prefix[start..end] into the beginning
// of it. Then scans the rest of the string, adding characters after the
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.h b/deps/v8/src/libplatform/tracing/trace-buffer.h
index 16f3b2a12e..3c756b7a69 100644
--- a/deps/v8/src/libplatform/tracing/trace-buffer.h
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
-#define SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+#ifndef V8_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+#define V8_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
#include <memory>
#include <vector>
@@ -45,4 +45,4 @@ class TraceBufferRingBuffer : public TraceBuffer {
} // namespace platform
} // namespace v8
-#endif // SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+#endif // V8_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
index 43d7cb6a90..7e1bdc24d6 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.h
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
-#define SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+#ifndef V8_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+#define V8_LIBPLATFORM_TRACING_TRACE_WRITER_H_
#include "include/libplatform/v8-tracing.h"
@@ -30,4 +30,4 @@ class JSONTraceWriter : public TraceWriter {
} // namespace platform
} // namespace v8
-#endif // SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+#endif // V8_LIBPLATFORM_TRACING_TRACE_WRITER_H_
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 75161fc7d6..492606475e 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -4,7 +4,7 @@
#include "src/libsampler/sampler.h"
-#if V8_OS_POSIX && !V8_OS_CYGWIN
+#if V8_OS_POSIX && !V8_OS_CYGWIN && !V8_OS_FUCHSIA
#define USE_SIGNALS
@@ -13,7 +13,7 @@
#include <signal.h>
#include <sys/time.h>
-#if !V8_OS_QNX && !V8_OS_FUCHSIA && !V8_OS_AIX
+#if !V8_OS_QNX && !V8_OS_AIX
#include <sys/syscall.h> // NOLINT
#endif
@@ -39,6 +39,28 @@
#include "src/base/win32-headers.h"
+#elif V8_OS_FUCHSIA
+
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+#include <zircon/syscalls/debug.h>
+#include <zircon/types.h>
+
+// TODO(wez): Remove this once the Fuchsia SDK has rolled.
+#if defined(ZX_THREAD_STATE_REGSET0)
+#define ZX_THREAD_STATE_GENERAL_REGS ZX_THREAD_STATE_REGSET0
+zx_status_t zx_thread_read_state(zx_handle_t h, uint32_t k, void* b, size_t l) {
+ uint32_t dummy_out_len = 0;
+ return zx_thread_read_state(h, k, b, static_cast<uint32_t>(l),
+ &dummy_out_len);
+}
+#if defined(__x86_64__)
+typedef zx_x86_64_general_regs_t zx_thread_state_general_regs_t;
+#else
+typedef zx_arm64_general_regs_t zx_thread_state_general_regs_t;
+#endif
+#endif // !defined(ZX_THREAD_STATE_GENERAL_REGS)
+
#endif
#include <algorithm>
@@ -336,6 +358,28 @@ class Sampler::PlatformData {
private:
HANDLE profiled_thread_;
};
+
+#elif V8_OS_FUCHSIA
+
+class Sampler::PlatformData {
+ public:
+ PlatformData() {
+ zx_handle_duplicate(zx_thread_self(), ZX_RIGHT_SAME_RIGHTS,
+ &profiled_thread_);
+ }
+ ~PlatformData() {
+ if (profiled_thread_ != ZX_HANDLE_INVALID) {
+ zx_handle_close(profiled_thread_);
+ profiled_thread_ = ZX_HANDLE_INVALID;
+ }
+ }
+
+ zx_handle_t profiled_thread() { return profiled_thread_; }
+
+ private:
+ zx_handle_t profiled_thread_ = ZX_HANDLE_INVALID;
+};
+
#endif // USE_SIGNALS
@@ -415,7 +459,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
-#if V8_OS_LINUX || V8_OS_FUCHSIA
+#if V8_OS_LINUX
#if V8_HOST_ARCH_IA32
state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
@@ -664,6 +708,53 @@ void Sampler::DoSample() {
ResumeThread(profiled_thread);
}
+#elif V8_OS_FUCHSIA
+
+void Sampler::DoSample() {
+ zx_handle_t profiled_thread = platform_data()->profiled_thread();
+ if (profiled_thread == ZX_HANDLE_INVALID) return;
+
+ if (zx_task_suspend(profiled_thread) != ZX_OK) return;
+
+ // Wait for the target thread to become suspended, or to exit.
+ // TODO(wez): There is currently no suspension count for threads, so there
+ // is a risk that some other caller resumes the thread in-between our suspend
+ // and wait calls, causing us to miss the SUSPENDED signal. We apply a 100ms
+ // deadline to protect against hanging the sampler thread in this case.
+ zx_signals_t signals = 0;
+ zx_status_t suspended = zx_object_wait_one(
+ profiled_thread, ZX_THREAD_SUSPENDED | ZX_THREAD_TERMINATED,
+ zx_deadline_after(ZX_MSEC(100)), &signals);
+ if (suspended != ZX_OK || (signals & ZX_THREAD_SUSPENDED) == 0) {
+ zx_task_resume(profiled_thread, 0);
+ return;
+ }
+
+ // Fetch a copy of its "general register" states.
+ zx_thread_state_general_regs_t thread_state = {};
+ if (zx_thread_read_state(profiled_thread, ZX_THREAD_STATE_GENERAL_REGS,
+ &thread_state, sizeof(thread_state)) == ZX_OK) {
+ v8::RegisterState state;
+#if V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<void*>(thread_state.rip);
+ state.sp = reinterpret_cast<void*>(thread_state.rsp);
+ state.fp = reinterpret_cast<void*>(thread_state.rbp);
+#elif V8_HOST_ARCH_ARM64
+ state.pc = reinterpret_cast<void*>(thread_state.pc);
+ state.sp = reinterpret_cast<void*>(thread_state.sp);
+ state.fp = reinterpret_cast<void*>(thread_state.r[29]);
+#endif
+ SampleStack(state);
+ }
+
+ zx_task_resume(profiled_thread, 0);
+}
+
+// TODO(wez): Remove this once the Fuchsia SDK has rolled.
+#if defined(ZX_THREAD_STATE_REGSET0)
+#undef ZX_THREAD_STATE_GENERAL_REGS
+#endif
+
#endif // USE_SIGNALS
} // namespace sampler
diff --git a/deps/v8/src/locked-queue-inl.h b/deps/v8/src/locked-queue-inl.h
index 31e8bd2fd8..65c8736d7a 100644
--- a/deps/v8/src/locked-queue-inl.h
+++ b/deps/v8/src/locked-queue-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOCKED_QUEUE_INL_
-#define V8_LOCKED_QUEUE_INL_
+#ifndef V8_LOCKED_QUEUE_INL_H_
+#define V8_LOCKED_QUEUE_INL_H_
#include "src/base/atomic-utils.h"
#include "src/locked-queue.h"
@@ -88,4 +88,4 @@ inline bool LockedQueue<Record>::Peek(Record* record) const {
} // namespace internal
} // namespace v8
-#endif // V8_LOCKED_QUEUE_INL_
+#endif // V8_LOCKED_QUEUE_INL_H_
diff --git a/deps/v8/src/locked-queue.h b/deps/v8/src/locked-queue.h
index 5bb97c8a12..1667917329 100644
--- a/deps/v8/src/locked-queue.h
+++ b/deps/v8/src/locked-queue.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOCKED_QUEUE_
-#define V8_LOCKED_QUEUE_
+#ifndef V8_LOCKED_QUEUE_H_
+#define V8_LOCKED_QUEUE_H_
#include "src/allocation.h"
#include "src/base/platform/platform.h"
@@ -40,4 +40,4 @@ class LockedQueue final BASE_EMBEDDED {
} // namespace internal
} // namespace v8
-#endif // V8_LOCKED_QUEUE_
+#endif // V8_LOCKED_QUEUE_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index f5d5be6848..90023e3731 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -16,6 +16,7 @@
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/instruction-stream.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/libsampler/sampler.h"
@@ -30,6 +31,8 @@
#include "src/tracing/tracing-category-observer.h"
#include "src/unicode-inl.h"
#include "src/vm-state-inl.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
#include "src/utils.h"
#include "src/version.h"
@@ -200,6 +203,24 @@ void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
+void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
+ wasm::WasmCode* code,
+ wasm::WasmName name) {
+ name_buffer_->Init(tag);
+ if (name.is_empty()) {
+ name_buffer_->AppendBytes("<wasm-unknown>");
+ } else {
+ name_buffer_->AppendBytes(name.start(), name.length());
+ }
+ name_buffer_->AppendByte('-');
+ if (code->IsAnonymous()) {
+ name_buffer_->AppendBytes("<anonymous>");
+ } else {
+ name_buffer_->AppendInt(code->index());
+ }
+ LogRecordedBuffer(code, name_buffer_->get(), name_buffer_->size());
+}
+
void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
String* source) {
name_buffer_->Init(CodeEventListener::REG_EXP_TAG);
@@ -207,6 +228,13 @@ void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
}
+void CodeEventLogger::InstructionStreamCreateEvent(
+ LogEventsAndTags tag, const InstructionStream* stream,
+ const char* description) {
+ name_buffer_->Init(tag);
+ name_buffer_->AppendBytes(description);
+ LogRecordedBuffer(stream, name_buffer_->get(), name_buffer_->size());
+}
// Linux perf tool logging support
class PerfBasicLogger : public CodeEventLogger {
@@ -221,6 +249,12 @@ class PerfBasicLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
+ void WriteLogRecordedBuffer(uintptr_t address, int size, const char* name,
+ int name_length);
// Extension added to V8 log file name to get the low-level log name.
static const char kFilenameFormatString[];
@@ -254,6 +288,19 @@ PerfBasicLogger::~PerfBasicLogger() {
perf_output_handle_ = nullptr;
}
+void PerfBasicLogger::WriteLogRecordedBuffer(uintptr_t address, int size,
+ const char* name,
+ int name_length) {
+ // Linux perf expects hex literals without a leading 0x, while some
+ // implementations of printf might prepend one when using the %p format
+ // for pointers, leading to wrongly formatted JIT symbols maps.
+ //
+ // Instead, we use V8PRIxPTR format string and cast pointer to uintpr_t,
+ // so that we have control over the exact output format.
+ base::OS::FPrint(perf_output_handle_, "%" V8PRIxPTR " %x %.*s\n", address,
+ size, name_length, name);
+}
+
void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
@@ -262,6 +309,19 @@ void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
return;
}
+ WriteLogRecordedBuffer(reinterpret_cast<uintptr_t>(code->instruction_start()),
+ code->instruction_size(), name, length);
+}
+
+void PerfBasicLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ WriteLogRecordedBuffer(
+ reinterpret_cast<uintptr_t>(code->instructions().start()),
+ code->instructions().length(), name, length);
+}
+
+void PerfBasicLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
// Linux perf expects hex literals without a leading 0x, while some
// implementations of printf might prepend one when using the %p format
// for pointers, leading to wrongly formatted JIT symbols maps.
@@ -269,8 +329,8 @@ void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
// Instead, we use V8PRIxPTR format string and cast pointer to uintpr_t,
// so that we have control over the exact output format.
base::OS::FPrint(perf_output_handle_, "%" V8PRIxPTR " %x %.*s\n",
- reinterpret_cast<uintptr_t>(code->instruction_start()),
- code->instruction_size(), length, name);
+ reinterpret_cast<uintptr_t>(stream->bytes()),
+ static_cast<int>(stream->byte_length()), length, name);
}
// Low-level logging support.
@@ -290,6 +350,10 @@ class LowLevelLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
// Low-level profiling event structures.
struct CodeCreateStruct {
@@ -386,6 +450,30 @@ void LowLevelLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
code->instruction_size());
}
+void LowLevelLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
+ CodeCreateStruct event;
+ event.name_size = length;
+ event.code_address = stream->bytes();
+ event.code_size = static_cast<int32_t>(stream->byte_length());
+ LogWriteStruct(event);
+ LogWriteBytes(name, length);
+ LogWriteBytes(reinterpret_cast<const char*>(stream->bytes()),
+ static_cast<int>(stream->byte_length()));
+}
+
+void LowLevelLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ CodeCreateStruct event;
+ event.name_size = length;
+ event.code_address = code->instructions().start();
+ event.code_size = code->instructions().length();
+ LogWriteStruct(event);
+ LogWriteBytes(name, length);
+ LogWriteBytes(reinterpret_cast<const char*>(code->instructions().start()),
+ code->instructions().length());
+}
+
void LowLevelLogger::CodeMoveEvent(AbstractCode* from, Address to) {
CodeMoveStruct event;
event.from_address = from->instruction_start();
@@ -425,6 +513,10 @@ class JitLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
JitCodeEventHandler code_event_handler_;
base::Mutex logger_mutex_;
@@ -453,6 +545,32 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
code_event_handler_(&event);
}
+void JitLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = stream->bytes();
+ event.code_len = stream->byte_length();
+ Handle<SharedFunctionInfo> shared_function_handle;
+ event.script = ToApiHandle<v8::UnboundScript>(shared_function_handle);
+ event.name.str = name;
+ event.name.len = length;
+ code_event_handler_(&event);
+}
+
+void JitLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADDED;
+ event.code_start = code->instructions().start();
+ event.code_len = code->instructions().length();
+ event.name.str = name;
+ event.name.len = length;
+ code_event_handler_(&event);
+}
+
void JitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
base::LockGuard<base::Mutex> guard(&logger_mutex_);
@@ -979,12 +1097,32 @@ namespace {
void AppendCodeCreateHeader(Log::MessageBuilder& msg,
CodeEventListener::LogEventsAndTags tag,
+ AbstractCode::Kind kind, uint8_t* address, int size,
+ base::ElapsedTimer* timer) {
+ msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
+ << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << kind
+ << Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
+ << reinterpret_cast<void*>(address) << Logger::kNext << size
+ << Logger::kNext;
+}
+
+void AppendCodeCreateHeader(Log::MessageBuilder& msg,
+ CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, base::ElapsedTimer* timer) {
+ AppendCodeCreateHeader(msg, tag, code->kind(), code->instruction_start(),
+ code->instruction_size(), timer);
+}
+
+void AppendCodeCreateHeader(Log::MessageBuilder& msg,
+ CodeEventListener::LogEventsAndTags tag,
+ const InstructionStream* stream,
+ base::ElapsedTimer* timer) {
+ // TODO(jgruber,v8:6666): In time, we'll need to support non-builtin streams.
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
- << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << code->kind()
+ << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << Code::BUILTIN
<< Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
- << reinterpret_cast<void*>(code->instruction_start()) << Logger::kNext
- << code->instruction_size() << Logger::kNext;
+ << reinterpret_cast<void*>(stream->bytes()) << Logger::kNext
+ << stream->byte_length() << Logger::kNext;
}
} // namespace
@@ -1026,6 +1164,21 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
msg.WriteToLogFile();
}
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code, wasm::WasmName name) {
+ if (!is_logging_code_events()) return;
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(msg, tag, AbstractCode::Kind::WASM_FUNCTION,
+ code->instructions().start(),
+ code->instructions().length(), &timer_);
+ if (name.is_empty()) {
+ msg << "<unknown wasm>";
+ } else {
+ msg << name.start();
+ }
+ msg.WriteToLogFile();
+}
// Although, it is possible to extract source and line from
// the SharedFunctionInfo object, we left it to caller
@@ -1174,6 +1327,17 @@ void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
msg.WriteToLogFile();
}
+void Logger::InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) {
+ if (!is_logging_code_events()) return;
+ if (!FLAG_log_code || !log_->IsEnabled()) return;
+ Log::MessageBuilder msg(log_);
+ AppendCodeCreateHeader(msg, tag, stream, &timer_);
+ msg << description;
+ msg.WriteToLogFile();
+}
+
void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
if (!is_logging_code_events()) return;
MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(), to);
@@ -1489,6 +1653,24 @@ static int EnumerateCompiledFunctions(Heap* heap,
return compiled_funcs_count;
}
+static int EnumerateWasmModules(Heap* heap,
+ Handle<WasmCompiledModule>* modules) {
+ HeapIterator iterator(heap);
+ DisallowHeapAllocation no_gc;
+ int wasm_modules_count = 0;
+
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
+ obj = iterator.next()) {
+ if (WasmCompiledModule::IsWasmCompiledModule(obj)) {
+ WasmCompiledModule* module = WasmCompiledModule::cast(obj);
+ if (modules != nullptr) {
+ modules[wasm_modules_count] = Handle<WasmCompiledModule>(module);
+ }
+ wasm_modules_count++;
+ }
+ }
+ return wasm_modules_count;
+}
void Logger::LogCodeObject(Object* object) {
AbstractCode* code_object = AbstractCode::cast(object);
@@ -1517,7 +1699,7 @@ void Logger::LogCodeObject(Object* object) {
break;
case AbstractCode::WASM_FUNCTION:
description = "A Wasm function";
- tag = CodeEventListener::STUB_TAG;
+ tag = CodeEventListener::FUNCTION_TAG;
break;
case AbstractCode::JS_TO_WASM_FUNCTION:
description = "A JavaScript to Wasm adapter";
@@ -1545,6 +1727,12 @@ void Logger::LogCodeObject(Object* object) {
PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
}
+void Logger::LogInstructionStream(Code* code, const InstructionStream* stream) {
+ DCHECK(Builtins::IsBuiltin(code));
+ const char* description = isolate_->builtins()->name(code->builtin_index());
+ CodeEventListener::LogEventsAndTags tag = CodeEventListener::BUILTIN_TAG;
+ PROFILE(isolate_, InstructionStreamCreateEvent(tag, stream, description));
+}
void Logger::LogCodeObjects() {
Heap* heap = isolate_->heap();
@@ -1637,13 +1825,12 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
}
}
-
void Logger::LogCompiledFunctions() {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
const int compiled_funcs_count =
EnumerateCompiledFunctions(heap, nullptr, nullptr);
- ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
+ ScopedVector<Handle<SharedFunctionInfo>> sfis(compiled_funcs_count);
ScopedVector<Handle<AbstractCode> > code_objects(compiled_funcs_count);
EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
@@ -1654,8 +1841,14 @@ void Logger::LogCompiledFunctions() {
continue;
LogExistingFunction(sfis[i], code_objects[i]);
}
-}
+ const int compiled_wasm_modules_count = EnumerateWasmModules(heap, nullptr);
+ ScopedVector<Handle<WasmCompiledModule>> modules(compiled_wasm_modules_count);
+ EnumerateWasmModules(heap, modules.start());
+ for (int i = 0; i < compiled_wasm_modules_count; ++i) {
+ modules[i]->LogWasmCodes(isolate_);
+ }
+}
void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 8305eb1001..b540c86173 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -74,6 +74,11 @@ class Profiler;
class ProfilerListener;
class RuntimeCallTimer;
class Ticker;
+class WasmCompiledModule;
+
+namespace wasm {
+class WasmCode;
+}
#undef LOG
#define LOG(isolate, Call) \
@@ -176,11 +181,16 @@ class Logger : public CodeEventListener {
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
Name* source, int line, int column);
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code, wasm::WasmName name);
// Emits a code deoptimization event.
void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
void RegExpCodeCreateEvent(AbstractCode* code, String* source);
+ void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description);
// Emits a code move event.
void CodeMoveEvent(AbstractCode* from, Address to);
// Emits a code line info record event.
@@ -234,6 +244,7 @@ class Logger : public CodeEventListener {
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code);
+ void LogCompiledModule(Handle<WasmCompiledModule> module);
// Logs all compiled functions found in the heap.
void LogCompiledFunctions();
// Logs all accessor callbacks found in the heap.
@@ -257,6 +268,9 @@ class Logger : public CodeEventListener {
// Used for logging stubs found in the snapshot.
void LogCodeObject(Object* code_object);
+ // Used for logging off-heap instruction streams.
+ void LogInstructionStream(Code* code, const InstructionStream* stream);
+
private:
explicit Logger(Isolate* isolate);
~Logger();
@@ -379,8 +393,13 @@ class CodeEventLogger : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source, int line,
int column) override;
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ wasm::WasmName name) override;
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void InstructionStreamCreateEvent(LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) override;
void CallbackEvent(Name* name, Address entry_point) override {}
void GetterCallbackEvent(Name* name, Address entry_point) override {}
void SetterCallbackEvent(Name* name, Address entry_point) override {}
@@ -394,6 +413,10 @@ class CodeEventLogger : public CodeEventListener {
virtual void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) = 0;
+ virtual void LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) = 0;
+ virtual void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) = 0;
NameBuffer* name_buffer_;
};
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 71902dff84..58ad9318dd 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -211,8 +211,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
auto root =
handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (root->IsNull(isolate)) {
- unsigned int magic = 0xBBBBBBBB;
- isolate->PushStackTraceAndDie(magic, *receiver, nullptr, magic);
+ isolate->PushStackTraceAndDie(*receiver);
}
return Handle<JSReceiver>::cast(root);
}
@@ -238,6 +237,7 @@ void LookupIterator::ReloadPropertyInformation() {
}
namespace {
+
bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
static uint32_t context_slots[] = {
#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype, size) \
@@ -253,43 +253,49 @@ bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
std::begin(context_slots), std::end(context_slots),
[=](uint32_t slot) { return isolate->IsInAnyContext(holder, slot); });
}
+
} // namespace
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
if (*name_ == heap()->constructor_string()) {
- if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+ if (!isolate_->IsSpeciesLookupChainIntact()) return;
// Setting the constructor property could change an instance's @@species
- if (holder_->IsJSArray() || holder_->IsJSTypedArray()) {
+ if (holder_->IsJSArray() || holder_->IsJSPromise() ||
+ holder_->IsJSTypedArray()) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
- isolate_->InvalidateArraySpeciesProtector();
+ isolate_->InvalidateSpeciesProtector();
} else if (holder_->map()->is_prototype_map()) {
DisallowHeapAllocation no_gc;
- // Setting the constructor of Array.prototype or %TypedArray%.prototype of
- // any realm also needs to invalidate the species protector.
+ // Setting the constructor of Array.prototype, Promise.prototype or
+ // %TypedArray%.prototype of any realm also needs to invalidate the
+ // @@species protector.
// For typed arrays, we check a prototype of this holder since TypedArrays
// have different prototypes for each type, and their parent prototype is
// pointing the same TYPED_ARRAY_PROTOTYPE.
if (isolate_->IsInAnyContext(*holder_,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
+ isolate_->IsInAnyContext(*holder_,
+ Context::PROMISE_PROTOTYPE_INDEX) ||
isolate_->IsInAnyContext(holder_->map()->prototype(),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
isolate_->CountUsage(v8::Isolate::UseCounterFeature::
kArrayPrototypeConstructorModified);
- isolate_->InvalidateArraySpeciesProtector();
+ isolate_->InvalidateSpeciesProtector();
}
}
} else if (*name_ == heap()->species_symbol()) {
- if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
- // Setting the Symbol.species property of any Array or TypedArray
- // constructor invalidates the species protector
+ if (!isolate_->IsSpeciesLookupChainIntact()) return;
+ // Setting the Symbol.species property of any Array, Promise or TypedArray
+ // constructor invalidates the @@species protector
if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX) ||
+ isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX) ||
IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
- isolate_->InvalidateArraySpeciesProtector();
+ isolate_->InvalidateSpeciesProtector();
}
} else if (*name_ == heap()->is_concat_spreadable_symbol()) {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
@@ -299,6 +305,14 @@ void LookupIterator::InternalUpdateProtector() {
if (holder_->IsJSArray()) {
isolate_->InvalidateArrayIteratorProtector();
}
+ } else if (*name_ == heap()->then_string()) {
+ if (!isolate_->IsPromiseThenLookupChainIntact()) return;
+ // Setting the "then" property on any JSPromise instance or on the
+ // initial %PromisePrototype% invalidates the Promise#then protector.
+ if (holder_->IsJSPromise() ||
+ isolate_->IsInAnyContext(*holder_, Context::PROMISE_PROTOTYPE_INDEX)) {
+ isolate_->InvalidatePromiseThenProtector();
+ }
}
}
@@ -306,35 +320,41 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
DCHECK(state_ == DATA || state_ == ACCESSOR);
DCHECK(HolderIsReceiverOrHiddenPrototype());
- Handle<JSObject> holder = GetHolder<JSObject>();
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+ // JSProxy does not have fast properties so we do an early return.
+ DCHECK_IMPLIES(holder->IsJSProxy(), !holder->HasFastProperties());
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
+ if (holder->IsJSProxy()) return;
+
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- ElementsKind kind = holder->GetElementsKind();
+ ElementsKind kind = holder_obj->GetElementsKind();
ElementsKind to = value->OptimalElementsKind();
if (IsHoleyOrDictionaryElementsKind(kind)) to = GetHoleyElementsKind(to);
to = GetMoreGeneralElementsKind(kind, to);
if (kind != to) {
- JSObject::TransitionElementsKind(holder, to);
+ JSObject::TransitionElementsKind(holder_obj, to);
}
// Copy the backing store if it is copy-on-write.
if (IsSmiOrObjectElementsKind(to)) {
- JSObject::EnsureWritableFastElements(holder);
+ JSObject::EnsureWritableFastElements(holder_obj);
}
return;
}
- if (holder->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder)->global_dictionary());
+ JSGlobalObject::cast(*holder_obj)->global_dictionary());
Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()));
property_details_ = cell->property_details();
PropertyCell::PrepareForValue(dictionary, dictionary_entry(), value,
property_details_);
return;
}
- if (!holder->HasFastProperties()) return;
+ if (!holder_obj->HasFastProperties()) return;
PropertyConstness new_constness = kConst;
if (FLAG_track_constant_fields) {
@@ -348,7 +368,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
new_constness = kMutable;
}
- Handle<Map> old_map(holder->map(), isolate_);
+ Handle<Map> old_map(holder_obj->map(), isolate_);
Handle<Map> new_map = Map::PrepareForDataProperty(
old_map, descriptor_number(), new_constness, value);
@@ -361,7 +381,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
return;
}
- JSObject::MigrateToMap(holder, new_map);
+ JSObject::MigrateToMap(holder_obj, new_map);
ReloadPropertyInformation<false>();
}
@@ -370,38 +390,47 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes) {
DCHECK(state_ == DATA || state_ == ACCESSOR);
DCHECK(HolderIsReceiverOrHiddenPrototype());
- Handle<JSObject> holder = GetHolder<JSObject>();
+
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
+
+ // Property details can never change for private fields.
+ if (holder->IsJSProxy()) {
+ DCHECK(name()->IsPrivate());
+ return;
+ }
+
+ Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- DCHECK(!holder->HasFixedTypedArrayElements());
- DCHECK(attributes != NONE || !holder->HasFastElements());
- Handle<FixedArrayBase> elements(holder->elements());
- holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
- attributes);
+ DCHECK(!holder_obj->HasFixedTypedArrayElements());
+ DCHECK(attributes != NONE || !holder_obj->HasFastElements());
+ Handle<FixedArrayBase> elements(holder_obj->elements());
+ holder_obj->GetElementsAccessor()->Reconfigure(holder_obj, elements,
+ number_, value, attributes);
ReloadPropertyInformation<true>();
- } else if (holder->HasFastProperties()) {
- Handle<Map> old_map(holder->map(), isolate_);
+ } else if (holder_obj->HasFastProperties()) {
+ Handle<Map> old_map(holder_obj->map(), isolate_);
Handle<Map> new_map = Map::ReconfigureExistingProperty(
old_map, descriptor_number(), i::kData, attributes);
// Force mutable to avoid changing constant value by reconfiguring
// kData -> kAccessor -> kData.
new_map = Map::PrepareForDataProperty(new_map, descriptor_number(),
kMutable, value);
- JSObject::MigrateToMap(holder, new_map);
+ JSObject::MigrateToMap(holder_obj, new_map);
ReloadPropertyInformation<false>();
}
- if (!IsElement() && !holder->HasFastProperties()) {
+ if (!IsElement() && !holder_obj->HasFastProperties()) {
PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
- if (holder->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder)->global_dictionary());
+ JSGlobalObject::cast(*holder_obj)->global_dictionary());
Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
dictionary, dictionary_entry(), value, details);
cell->set_value(*value);
property_details_ = cell->property_details();
} else {
- Handle<NameDictionary> dictionary(holder->property_dictionary());
+ Handle<NameDictionary> dictionary(holder_obj->property_dictionary());
PropertyDetails original_details =
dictionary->DetailsAt(dictionary_entry());
int enumeration_index = original_details.dictionary_index();
@@ -417,7 +446,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- holder->JSObjectVerify();
+ holder->HeapObjectVerify();
}
#endif
}
@@ -427,9 +456,10 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
// Returns true if a new transition has been created, or false if an existing
// transition was followed.
bool LookupIterator::PrepareTransitionToDataProperty(
- Handle<JSObject> receiver, Handle<Object> value,
+ Handle<JSReceiver> receiver, Handle<Object> value,
PropertyAttributes attributes, Object::StoreFromKeyed store_mode) {
- DCHECK(receiver.is_identical_to(GetStoreTarget()));
+ DCHECK_IMPLIES(receiver->IsJSProxy(), name()->IsPrivate());
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
if (state_ == TRANSITION) return false;
if (!IsElement() && name()->IsPrivate()) {
@@ -497,10 +527,11 @@ bool LookupIterator::PrepareTransitionToDataProperty(
return created_new_map;
}
-void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
+void LookupIterator::ApplyTransitionToDataProperty(
+ Handle<JSReceiver> receiver) {
DCHECK_EQ(TRANSITION, state_);
- DCHECK(receiver.is_identical_to(GetStoreTarget()));
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
holder_ = receiver;
if (receiver->IsJSGlobalObject()) {
JSObject::InvalidatePrototypeChains(receiver->map());
@@ -509,7 +540,10 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
}
Handle<Map> transition = transition_map();
bool simple_transition = transition->GetBackPointer() == receiver->map();
- JSObject::MigrateToMap(receiver, transition);
+
+ if (!receiver->IsJSProxy()) {
+ JSObject::MigrateToMap(Handle<JSObject>::cast(receiver), transition);
+ }
if (simple_transition) {
int number = transition->LastAdded();
@@ -520,7 +554,7 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
Handle<NameDictionary> dictionary(receiver->property_dictionary(),
isolate_);
int entry;
- if (receiver->map()->is_prototype_map()) {
+ if (receiver->map()->is_prototype_map() && receiver->IsJSObject()) {
JSObject::InvalidatePrototypeChains(receiver->map());
}
dictionary = NameDictionary::Add(dictionary, name(),
@@ -546,6 +580,7 @@ void LookupIterator::Delete() {
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->Delete(object, number_);
} else {
+ DCHECK(!name()->IsPrivateField());
bool is_prototype_map = holder->map()->is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
@@ -575,7 +610,7 @@ void LookupIterator::TransitionToAccessorProperty(
// Can only be called when the receiver is a JSObject. JSProxy has to be
// handled via a trap. Adding properties to primitive values is not
// observable.
- Handle<JSObject> receiver = GetStoreTarget();
+ Handle<JSObject> receiver = GetStoreTarget<JSObject>();
if (!IsElement() && name()->IsPrivate()) {
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
}
@@ -639,7 +674,7 @@ void LookupIterator::TransitionToAccessorProperty(
void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
PropertyAttributes attributes) {
- Handle<JSObject> receiver = GetStoreTarget();
+ Handle<JSObject> receiver = GetStoreTarget<JSObject>();
holder_ = receiver;
PropertyDetails details(kAccessor, attributes, PropertyCellType::kMutable);
@@ -868,6 +903,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
JSGlobalObject::cast(*holder)->global_dictionary();
dictionary->CellAt(dictionary_entry())->set_value(*value);
} else {
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
NameDictionary* dictionary = holder->property_dictionary();
dictionary->ValueAtPut(dictionary_entry(), *value);
}
@@ -883,7 +919,7 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
switch (interceptor_state_) {
case InterceptorState::kUninitialized:
interceptor_state_ = InterceptorState::kSkipNonMasking;
- // Fall through.
+ V8_FALLTHROUGH;
case InterceptorState::kSkipNonMasking:
return true;
case InterceptorState::kProcessNonMasking:
@@ -934,13 +970,13 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
if (map->is_access_check_needed()) {
if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
}
- // Fall through.
+ V8_FALLTHROUGH;
case ACCESS_CHECK:
if (check_interceptor() && HasInterceptor<is_element>(map) &&
!SkipInterceptor<is_element>(JSObject::cast(holder))) {
if (is_element || !name_->IsPrivate()) return INTERCEPTOR;
}
- // Fall through.
+ V8_FALLTHROUGH;
case INTERCEPTOR:
if (!is_element && map->IsJSGlobalObjectMap()) {
GlobalDictionary* dict =
@@ -996,6 +1032,7 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
number_ = static_cast<uint32_t>(number);
property_details_ = descriptors->GetDetails(number_);
} else {
+ DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
NameDictionary* dict = holder->property_dictionary();
int number = dict->FindEntry(name_);
if (number == NameDictionary::kNotFound) return NotFound(holder);
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 9ea2d77cf6..e107f534df 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -176,17 +176,17 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
Factory* factory() const { return isolate_->factory(); }
Handle<Object> GetReceiver() const { return receiver_; }
- Handle<JSObject> GetStoreTarget() const {
- DCHECK(receiver_->IsJSObject());
+ template <class T>
+ Handle<T> GetStoreTarget() const {
+ DCHECK(receiver_->IsJSReceiver());
if (receiver_->IsJSGlobalProxy()) {
Map* map = JSGlobalProxy::cast(*receiver_)->map();
if (map->has_hidden_prototype()) {
return handle(JSGlobalObject::cast(map->prototype()), isolate_);
}
}
- return Handle<JSObject>::cast(receiver_);
+ return Handle<T>::cast(receiver_);
}
-
bool is_dictionary_holder() const { return !holder_->HasFastProperties(); }
Handle<Map> transition_map() const {
DCHECK_EQ(TRANSITION, state_);
@@ -213,13 +213,13 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
bool HasAccess() const;
/* PROPERTY */
- bool ExtendingNonExtensible(Handle<JSObject> receiver) {
- DCHECK(receiver.is_identical_to(GetStoreTarget()));
+ bool ExtendingNonExtensible(Handle<JSReceiver> receiver) {
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
return !receiver->map()->is_extensible() &&
(IsElement() || !name_->IsPrivate());
}
void PrepareForDataProperty(Handle<Object> value);
- bool PrepareTransitionToDataProperty(Handle<JSObject> receiver,
+ bool PrepareTransitionToDataProperty(Handle<JSReceiver> receiver,
Handle<Object> value,
PropertyAttributes attributes,
Object::StoreFromKeyed store_mode);
@@ -227,10 +227,10 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
DCHECK_EQ(TRANSITION, state_);
return transition_->IsPropertyCell() ||
(transition_map()->is_dictionary_map() &&
- !GetStoreTarget()->HasFastProperties()) ||
+ !GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
transition_map()->GetBackPointer()->IsMap();
}
- void ApplyTransitionToDataProperty(Handle<JSObject> receiver);
+ void ApplyTransitionToDataProperty(Handle<JSReceiver> receiver);
void ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes);
void Delete();
@@ -275,11 +275,12 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
inline void UpdateProtector() {
if (IsElement()) return;
// This list must be kept in sync with
- // CodeStubAssembler::HasAssociatedProtector!
+ // CodeStubAssembler::CheckForAssociatedProtector!
if (*name_ == heap()->is_concat_spreadable_symbol() ||
*name_ == heap()->constructor_string() ||
*name_ == heap()->species_symbol() ||
- *name_ == heap()->iterator_symbol()) {
+ *name_ == heap()->iterator_symbol() ||
+ *name_ == heap()->then_string()) {
InternalUpdateProtector();
}
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 923535517a..1a1a5b29ff 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -259,6 +259,7 @@ class ErrorUtils : public AllStatic {
T(Debugger, "Debugger: %") \
T(DebuggerLoading, "Error loading debugger") \
T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
+ T(DeletePrivateField, "Private fields can not be deleted") \
T(UncaughtException, "Uncaught %") \
T(Unsupported, "Not supported") \
T(WrongServiceType, "Internal error, wrong service type: %") \
@@ -340,6 +341,7 @@ class ErrorUtils : public AllStatic {
T(InvalidRegExpExecResult, \
"RegExp exec method returned something other than an Object or null") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
+ T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
T(LanguageID, "Language ID should be string or object.") \
T(MethodCalledOnWrongObject, \
@@ -598,6 +600,7 @@ class ErrorUtils : public AllStatic {
T(IllegalLanguageModeDirective, \
"Illegal '%' directive in function with non-simple parameter list") \
T(IllegalReturn, "Illegal return statement") \
+ T(IntrinsicWithSpread, "Intrinsic calls do not support spread arguments") \
T(InvalidRestBindingPattern, \
"`...` must be followed by an identifier in declaration contexts") \
T(InvalidRestAssignmentPattern, \
@@ -615,6 +618,7 @@ class ErrorUtils : public AllStatic {
"Invalid left-hand side expression in prefix operation") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
+ T(InvalidPrivateFieldAccess, "Invalid private field '%'") \
T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
@@ -685,8 +689,6 @@ class ErrorUtils : public AllStatic {
T(TypedArrayTooShort, \
"Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
- T(UnexpectedFunctionSent, \
- "function.sent expression is not allowed outside a generator") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
T(UnexpectedSuper, "'super' keyword unexpected here") \
@@ -722,11 +724,10 @@ class ErrorUtils : public AllStatic {
T(WasmTrapDivByZero, "divide by zero") \
T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
T(WasmTrapRemByZero, "remainder by zero") \
- T(WasmTrapFloatUnrepresentable, "integer result unrepresentable") \
- T(WasmTrapFuncInvalid, "invalid function") \
+ T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
+ T(WasmTrapFuncInvalid, "invalid index into function table") \
T(WasmTrapFuncSigMismatch, "function signature mismatch") \
- T(WasmTrapInvalidIndex, "invalid index into function table") \
- T(WasmTrapTypeError, "invalid type") \
+ T(WasmTrapTypeError, "wasm function signature contains illegal type") \
T(WasmExceptionError, "wasm exception") \
/* Asm.js validation related */ \
T(AsmJsInvalid, "Invalid asm.js: %") \
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 803c16b829..a5e2335852 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -46,8 +46,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
@@ -126,16 +125,14 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code* code, Address target) {
if (IsMipsArchVariant(kMips32r6)) {
// On R6 the address location is shifted by one instruction
set_target_address_at(
- isolate,
instruction_payload - (kInstructionsFor32BitConstant - 1) * kInstrSize,
code ? code->constant_pool() : nullptr, target);
} else {
set_target_address_at(
- isolate,
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
code ? code->constant_pool() : nullptr, target);
}
@@ -172,9 +169,8 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
// after complete deserialization, no need to flush on each reference.
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -200,7 +196,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -250,15 +246,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -267,12 +263,12 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index bd540346c0..a39c06eaa2 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -79,6 +79,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#if defined(_MIPS_ARCH_MIPS32R6)
// FP64 mode is implied on r6.
supported_ |= 1u << FP64FPU;
+#if defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#endif
#endif
#if defined(FPU_MODE_FP64)
supported_ |= 1u << FP64FPU;
@@ -91,8 +94,14 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
#elif defined(FPU_MODE_FP64)
supported_ |= 1u << FP64FPU;
+#if defined(_MIPS_ARCH_MIPS32R6)
+#if defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#else
if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
#endif
+#endif
+#endif
#if defined(_MIPS_ARCH_MIPS32RX)
if (cpu.architecture() == 6) {
supported_ |= 1u << MIPSr6;
@@ -200,22 +209,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -272,8 +279,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = buffer_ + request.offset();
- set_target_value_at(isolate, pc,
- reinterpret_cast<uint32_t>(object.location()));
+ set_target_value_at(pc, reinterpret_cast<uint32_t>(object.location()));
}
}
@@ -2492,15 +2498,6 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
}
-void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xFFFFFFFF;
- *hi = i >> 32;
-}
-
-
void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
@@ -3889,11 +3886,8 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_value_at(Isolate* isolate, Address pc,
- uint32_t target,
+void Assembler::set_target_value_at(Address pc, uint32_t target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
-
Instr instr2 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -3924,7 +3918,7 @@ void Assembler::set_target_value_at(Isolate* isolate, Address pc,
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
+ Assembler::FlushICache(pc, 2 * sizeof(int32_t));
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 4c68e730b3..a5d608898f 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -388,7 +388,7 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32))
+ RelocInfo::Mode rmode = RelocInfo::NONE))
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -400,8 +400,7 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value))
- : rm_(no_reg), rmode_(RelocInfo::NONE32) {
+ INLINE(explicit Operand(Smi* value)) : rm_(no_reg), rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -568,9 +567,9 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
INLINE(static void set_target_address_at)
- (Isolate* isolate, Address pc, Address target,
+ (Address pc, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
- set_target_value_at(isolate, pc, reinterpret_cast<uint32_t>(target),
+ set_target_value_at(pc, reinterpret_cast<uint32_t>(target),
icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
@@ -578,13 +577,13 @@ class Assembler : public AssemblerBase {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(isolate, pc, target, icache_flush_mode);
+ set_target_address_at(pc, target, icache_flush_mode);
}
static void set_target_value_at(
- Isolate* isolate, Address pc, uint32_t target,
+ Address pc, uint32_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -597,12 +596,11 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -1893,8 +1891,6 @@ class Assembler : public AssemblerBase {
return internal_trampoline_exception_;
}
- void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
-
bool is_trampoline_emitted() const {
return trampoline_emitted_;
}
@@ -2235,4 +2231,4 @@ class UseScratchRegisterScope {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_ASSEMBLER_MIPS_H_
+#endif // V8_MIPS_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index c07422ff5f..b2e52745ed 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -541,9 +541,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -568,9 +568,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 565fcd9a68..496c715e81 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CONSTANTS_H_
-#define V8_MIPS_CONSTANTS_H_
+#ifndef V8_MIPS_CONSTANTS_MIPS_H_
+#define V8_MIPS_CONSTANTS_MIPS_H_
#include "src/globals.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
@@ -1931,4 +1931,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
-#endif // #ifndef V8_MIPS_CONSTANTS_H_
+#endif // V8_MIPS_CONSTANTS_MIPS_H_
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index 1199365b7d..2e71817bd8 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -38,9 +38,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
- if (res) {
- V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
- }
+ if (res) FATAL("Failed to flush the instruction cache");
#endif // ANDROID
#endif // !USE_SIMULATOR.
}
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index f27bdc9b68..d2f8ebb0ee 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -917,6 +917,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintMsaImmElm(instr);
return 4;
}
+ UNREACHABLE();
}
case 'r': { // 'r: registers.
return FormatRegister(instr, format);
diff --git a/deps/v8/src/mips/frame-constants-mips.h b/deps/v8/src/mips/frame-constants-mips.h
index 344453794a..6d7e471b09 100644
--- a/deps/v8/src/mips/frame-constants-mips.h
+++ b/deps/v8/src/mips/frame-constants-mips.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_FRAMES_MIPS_H_
-#define V8_MIPS_FRAMES_MIPS_H_
+#ifndef V8_MIPS_FRAME_CONSTANTS_MIPS_H_
+#define V8_MIPS_FRAME_CONSTANTS_MIPS_H_
namespace v8 {
namespace internal {
@@ -49,4 +49,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_MIPS_FRAME_CONSTANTS_MIPS_H_
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 795fdc4af8..a23f8f0fd4 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -68,12 +68,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a2, a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 5c89467cd8..84cf23c832 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -14,6 +14,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/mips/assembler-mips-inl.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/register-configuration.h"
@@ -1162,6 +1163,7 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
@@ -1185,6 +1187,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
MemOperand tmp = src;
AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
@@ -2170,23 +2173,22 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
}
}
-void TurboAssembler::Move(FPURegister dst, float imm) {
+void TurboAssembler::Move(FPURegister dst, uint32_t src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, Operand(bit_cast<int32_t>(imm)));
+ li(scratch, Operand(static_cast<int32_t>(src)));
mtc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, double imm) {
- int64_t imm_bits = bit_cast<int64_t>(imm);
+void TurboAssembler::Move(FPURegister dst, uint64_t src) {
// Handle special values first.
- if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
Neg_d(dst, kDoubleRegZero);
} else {
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
+ uint32_t lo = src & 0xFFFFFFFF;
+ uint32_t hi = src >> 32;
// Move the low part of the double into the lower of the corresponding FPU
// register of FPU register pair.
if (lo != 0) {
@@ -2308,6 +2310,79 @@ void TurboAssembler::Clz(Register rd, Register rs) {
}
}
+void TurboAssembler::Ctz(Register rd, Register rs) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ Ror(rd, rs, 16);
+ wsbh(rd, rd);
+ bitswap(rd, rd);
+ Clz(rd, rd);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Addu(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ Clz(rd, rd);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ li(scratch, 32);
+ Subu(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Popcnt(Register rd, Register rs) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For 64-bit quantities, this algorithm gets executed twice, (once
+ // for in_lo, and again for in_hi), but saves a few instructions
+ // because the mask values only have to be loaded once. Using this
+ // algorithm the count for a 64-bit operand can be performed in 29
+ // instructions compared to a loop-based algorithm which requires 47
+ // instructions.
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srl(scratch, rs, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Subu(scratch, rs, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srl(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Addu(scratch, rd, scratch);
+ srl(rd, scratch, 4);
+ Addu(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul(rd, rd, scratch);
+ srl(rd, rd, shift);
+}
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
@@ -3991,12 +4066,27 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ li(t0, Operand(debug_is_active));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
+ Branch(&call_hook, ne, t0, Operand(zero_reg));
+
+ lw(t0, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(t0, &skip_hook);
+ lw(t0, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
+ And(t0, t0, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4054,13 +4144,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = t0;
+ Register code = kJavaScriptCallCodeStartRegister;
lw(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
- Call(code, Code::kHeaderSize - kHeapObjectTag);
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Call(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
- Jump(code, Code::kHeaderSize - kHeapObjectTag);
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
@@ -4105,14 +4197,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- li(a1, function);
- InvokeFunction(a1, expected, actual, flag);
-}
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -4467,6 +4551,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
+ li(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -5274,6 +5364,26 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // This push on ra and the pop below together ensure that we restore the
+ // register ra, which is needed while computing the code start address.
+ push(ra);
+
+ // The bal instruction puts the address of the current instruction into
+ // the return address (ra) register, which we can use later on.
+ Label current;
+ bal(&current);
+ nop();
+ int pc = pc_offset();
+ bind(&current);
+ li(dst, pc);
+ subu(dst, ra, dst);
+
+ pop(ra); // Restore ra
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 8c70eb54a3..37d2c59270 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -19,12 +19,15 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6;
constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
@@ -559,6 +562,8 @@ class TurboAssembler : public Assembler {
void Movf(Register rd, Register rs, uint16_t cc = 0);
void Clz(Register rd, Register rs);
+ void Ctz(Register rd, Register rs);
+ void Popcnt(Register rd, Register rs);
// Int64Lowering instructions
void AddPair(Register dst_low, Register dst_high, Register left_low,
@@ -731,8 +736,10 @@ class TurboAssembler : public Assembler {
Mthc1(src_high, dst);
}
- void Move(FPURegister dst, float imm);
- void Move(FPURegister dst, double imm);
+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
+ void Move(FPURegister dst, uint32_t src);
+ void Move(FPURegister dst, uint64_t src);
// -------------------------------------------------------------------------
// Overflow handling functions.
@@ -844,6 +851,12 @@ class TurboAssembler : public Assembler {
BranchF64(bd, target, nan, cc, cmp1, cmp2);
}
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
void BranchLong(Label* L, BranchDelaySlot bdslot);
@@ -1023,10 +1036,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support.
void MaybeDropFrames();
@@ -1089,6 +1098,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
BranchDelaySlot bd = PROTECT,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// -------------------------------------------------------------------------
// StatsCounter support.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 4994418ef5..b55273eba5 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -783,8 +783,7 @@ void MipsDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -883,11 +882,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_ = reinterpret_cast<char*>(malloc(stack_size_));
@@ -2539,8 +2533,7 @@ void Simulator::PrintStopInfo(uint32_t code) {
void Simulator::SignalException(Exception e) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.",
- static_cast<int>(e));
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
}
// Min/Max template functions for Double and Single arguments.
@@ -5690,7 +5683,8 @@ void Simulator::DecodeTypeMsa3RF() {
case MSUB_Q:
case MADDR_Q:
case MSUBR_Q:
- get_msa_register(wd_reg(), &wd); // fall-through
+ get_msa_register(wd_reg(), &wd);
+ V8_FALLTHROUGH;
case MUL_Q:
case MULR_Q:
switch (DecodeMsaDataFormat()) {
@@ -6912,7 +6906,7 @@ void Simulator::DecodeTypeJump() {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
v8::internal::EmbeddedVector<char, 256> buffer;
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 0c417becd5..ffd2c46740 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -258,6 +258,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -450,10 +451,10 @@ class Simulator : public SimulatorBase {
Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
- V8_Fatal(__FILE__, __LINE__,
- "Error: Unexpected instruction 0x%08x immediately after a "
- "compact branch instruction.",
- *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
+ FATAL(
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
}
}
@@ -480,9 +481,8 @@ class Simulator : public SimulatorBase {
}
if (instr->IsForbiddenInBranchDelay()) {
- V8_Fatal(__FILE__, __LINE__,
- "Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeValue());
+ FATAL("Eror:Unexpected %i opcode in a branch delay slot.",
+ instr->OpcodeValue());
}
InstructionDecode(instr);
SNPrintF(trace_buf_, " ");
@@ -538,9 +538,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation.
- base::CustomMatcherHashMap* i_cache_;
-
v8::internal::Isolate* isolate_;
// Registered breakpoints.
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 3fce7dd688..4ce9d7f91d 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com \ No newline at end of file
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index ded3da224c..e05082ee40 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -33,9 +33,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
-#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#ifndef V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
+#define V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
#include "src/mips64/assembler-mips64.h"
@@ -46,8 +45,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
@@ -121,9 +119,9 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ Address instruction_payload, Code* code, Address target) {
set_target_address_at(
- isolate, instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
+ instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
code ? code->constant_pool() : nullptr, target);
}
@@ -144,9 +142,8 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
// after complete deserialization, no need to flush on each reference.
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsJ(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -172,7 +169,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
@@ -217,15 +214,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -234,12 +231,12 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -343,4 +340,4 @@ EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#endif // V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index a056f66849..6c0bebebce 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -76,12 +76,21 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS64R6) && defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#endif
#else
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS64R6)
+#if defined(_MIPS_MSA)
+ supported_ |= 1u << MIPS_SIMD;
+#else
if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD;
#endif
+#endif
+#endif
}
@@ -178,22 +187,20 @@ uint32_t RelocInfo::embedded_size() const {
(Assembler::target_address_at(pc_, constant_pool_))));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -251,8 +258,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = buffer_ + request.offset();
- set_target_value_at(isolate, pc,
- reinterpret_cast<uint64_t>(object.location()));
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
}
}
@@ -2893,15 +2899,6 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
}
-void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xFFFFFFFF;
- *hi = i >> 32;
-}
-
-
void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
@@ -4217,8 +4214,7 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_value_at(Isolate* isolate, Address pc,
- uint64_t target,
+void Assembler::set_target_value_at(Address pc, uint64_t target,
ICacheFlushMode icache_flush_mode) {
// There is an optimization where only 4 instructions are used to load address
// in code on MIP64 because only 48-bits of address is effectively used.
@@ -4249,7 +4245,7 @@ void Assembler::set_target_value_at(Isolate* isolate, Address pc,
(target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(pc, 4 * Assembler::kInstrSize);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 3530c7e7b2..9f1fe59de8 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -32,9 +32,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
-#define V8_MIPS_ASSEMBLER_MIPS_H_
+#ifndef V8_MIPS64_ASSEMBLER_MIPS64_H_
+#define V8_MIPS64_ASSEMBLER_MIPS64_H_
#include <stdio.h>
@@ -396,7 +395,7 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE64))
+ RelocInfo::Mode rmode = RelocInfo::NONE))
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -408,8 +407,7 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value))
- : rm_(no_reg), rmode_(RelocInfo::NONE32) {
+ INLINE(explicit Operand(Smi* value)) : rm_(no_reg), rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -577,9 +575,9 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_value_at(isolate, pc, reinterpret_cast<uint64_t>(target),
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(target),
icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
@@ -587,13 +585,13 @@ class Assembler : public AssemblerBase {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(isolate, pc, target, icache_flush_mode);
+ set_target_address_at(pc, target, icache_flush_mode);
}
static void set_target_value_at(
- Isolate* isolate, Address pc, uint64_t target,
+ Address pc, uint64_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -608,12 +606,11 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -1963,8 +1960,6 @@ class Assembler : public AssemblerBase {
return internal_trampoline_exception_;
}
- void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
-
bool is_trampoline_emitted() const {
return trampoline_emitted_;
}
@@ -2300,4 +2295,4 @@ class UseScratchRegisterScope {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_ASSEMBLER_MIPS_H_
+#endif // V8_MIPS64_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index 0513611664..f5d20d8c2b 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CODE_STUBS_MIPS64_H_
-#define V8_MIPS_CODE_STUBS_MIPS64_H_
+#ifndef V8_MIPS64_CODE_STUBS_MIPS64_H_
+#define V8_MIPS64_CODE_STUBS_MIPS64_H_
namespace v8 {
namespace internal {
@@ -28,4 +28,4 @@ class DirectCEntryStub : public PlatformCodeStub {
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_CODE_STUBS_MIPS64_H_
+#endif // V8_MIPS64_CODE_STUBS_MIPS64_H_
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 3be5e504bb..7b2fbd78a5 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -542,9 +542,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
}
CodeDesc desc;
masm.GetCode(isolte, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -569,9 +569,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 0c107d1e1b..e89c4a5df3 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CONSTANTS_H_
-#define V8_MIPS_CONSTANTS_H_
+#ifndef V8_MIPS64_CONSTANTS_MIPS64_H_
+#define V8_MIPS64_CONSTANTS_MIPS64_H_
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -2013,4 +2013,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
-#endif // #ifndef V8_MIPS_CONSTANTS_H_
+#endif // V8_MIPS64_CONSTANTS_MIPS64_H_
diff --git a/deps/v8/src/mips64/cpu-mips64.cc b/deps/v8/src/mips64/cpu-mips64.cc
index ab9cf69620..cc8ecdbd5a 100644
--- a/deps/v8/src/mips64/cpu-mips64.cc
+++ b/deps/v8/src/mips64/cpu-mips64.cc
@@ -38,9 +38,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
long res; // NOLINT(runtime/int)
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
- if (res) {
- V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
- }
+ if (res) FATAL("Failed to flush the instruction cache");
#endif // ANDROID
#endif // !USE_SIMULATOR.
}
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index d53b47d0c6..8f77a68b21 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -958,6 +958,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintMsaImmElm(instr);
return 4;
}
+ UNREACHABLE();
}
case 'r': { // 'r: registers.
return FormatRegister(instr, format);
diff --git a/deps/v8/src/mips64/frame-constants-mips64.h b/deps/v8/src/mips64/frame-constants-mips64.h
index 344453794a..9c7455bcc5 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/mips64/frame-constants-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_FRAMES_MIPS_H_
-#define V8_MIPS_FRAMES_MIPS_H_
+#ifndef V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
+#define V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
namespace v8 {
namespace internal {
@@ -49,4 +49,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 8bc04a0401..6aba359b78 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -68,12 +68,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a2, a3};
- data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 841f4665cf..79f486b4bb 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -14,6 +14,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/register-configuration.h"
@@ -2703,39 +2704,38 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
mthc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, float imm) {
+void TurboAssembler::Move(FPURegister dst, uint32_t src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, Operand(bit_cast<int32_t>(imm)));
+ li(scratch, Operand(static_cast<int32_t>(src)));
mtc1(scratch, dst);
}
-void TurboAssembler::Move(FPURegister dst, double imm) {
- int64_t imm_bits = bit_cast<int64_t>(imm);
+void TurboAssembler::Move(FPURegister dst, uint64_t src) {
// Handle special values first.
- if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
Neg_d(dst, kDoubleRegZero);
} else {
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
- // Move the low part of the double into the lower bits of the corresponding
- // FPU register.
+ uint32_t lo = src & 0xFFFFFFFF;
+ uint32_t hi = src >> 32;
+ // Move the low part of the double into the lower of the corresponding FPU
+ // register of FPU register pair.
if (lo != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, lo);
+ li(scratch, Operand(lo));
mtc1(scratch, dst);
} else {
mtc1(zero_reg, dst);
}
- // Move the high part of the double into the high bits of the corresponding
- // FPU register.
+ // Move the high part of the double into the higher of the corresponding FPU
+ // register of FPU register pair.
if (hi != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, hi);
+ li(scratch, Operand(hi));
mthc1(scratch, dst);
} else {
mthc1(zero_reg, dst);
@@ -2776,6 +2776,136 @@ void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
+void TurboAssembler::Ctz(Register rd, Register rs) {
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ rotr(rd, rs, 16);
+ wsbh(rd, rd);
+ bitswap(rd, rd);
+ Clz(rd, rd);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Daddu(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ Clz(rd, rd);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ li(scratch, 32);
+ Subu(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Dctz(Register rd, Register rs) {
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ dsbh(rd, rs);
+ dshd(rd, rd);
+ dbitswap(rd, rd);
+ dclz(rd, rd);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Daddu(scratch, rs, -1);
+ Xor(rd, scratch, rs);
+ And(rd, rd, scratch);
+ // Count number of leading zeroes.
+ dclz(rd, rd);
+ // Subtract number of leading zeroes from 64 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ li(scratch, 64);
+ Dsubu(rd, scratch, rd);
+ }
+}
+
+void TurboAssembler::Popcnt(Register rd, Register rs) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For a 64-bit operand this can be performed in 24 instructions compared
+ // to a(n unrolled) loop based algorithm which requires 38 instructions.
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srl(scratch, rs, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Subu(scratch, rs, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srl(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Addu(scratch, rd, scratch);
+ srl(rd, scratch, 4);
+ Addu(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul(rd, rd, scratch);
+ srl(rd, rd, shift);
+}
+
+void TurboAssembler::Dpopcnt(Register rd, Register rs) {
+ uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ dsrl(scratch, rs, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Dsubu(scratch, rs, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ dsrl(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Daddu(scratch, rd, scratch);
+ dsrl(rd, scratch, 4);
+ Daddu(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Dmul(rd, rd, scratch);
+ dsrl32(rd, rd, shift);
+}
+
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
@@ -4267,12 +4397,28 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ li(t0, Operand(debug_is_active));
+ Lb(t0, MemOperand(t0));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
Lb(t0, MemOperand(t0));
+ Branch(&call_hook, ne, t0, Operand(zero_reg));
+
+ Ld(t0, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(t0, &skip_hook);
+ Ld(t0, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
+ And(t0, t0, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4330,7 +4476,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = t0;
+ Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4383,14 +4529,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- li(a1, function);
- InvokeFunction(a1, expected, actual, flag);
-}
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -4723,6 +4861,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
+ li(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -5543,6 +5687,26 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
return n_of_valid_regs != n_of_non_aliasing_regs;
}
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // This push on ra and the pop below together ensure that we restore the
+ // register ra, which is needed while computing the code start address.
+ push(ra);
+
+ // The bal instruction puts the address of the current instruction into
+ // the return address (ra) register, which we can use later on.
+ Label current;
+ bal(&current);
+ nop();
+ int pc = pc_offset();
+ bind(&current);
+ li(dst, Operand(pc));
+ Dsubu(dst, ra, dst);
+
+ pop(ra); // Restore ra
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index f89682d34c..f623f7f3cb 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#ifndef V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
+#define V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
#include "src/assembler.h"
#include "src/globals.h"
@@ -19,12 +19,15 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kSpeculationPoisonRegister = a7;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
constexpr Register kInterpreterDispatchTableRegister = t2;
constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
@@ -601,6 +604,10 @@ class TurboAssembler : public Assembler {
void Movf(Register rd, Register rs, uint16_t cc = 0);
void Clz(Register rd, Register rs);
+ void Ctz(Register rd, Register rs);
+ void Dctz(Register rd, Register rs);
+ void Popcnt(Register rd, Register rs);
+ void Dpopcnt(Register rd, Register rs);
// MIPS64 R2 instruction macro.
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -744,8 +751,10 @@ class TurboAssembler : public Assembler {
}
}
- void Move(FPURegister dst, float imm);
- void Move(FPURegister dst, double imm);
+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
+ void Move(FPURegister dst, uint32_t src);
+ void Move(FPURegister dst, uint64_t src);
inline void MulBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Register scratch = at) {
@@ -875,6 +884,12 @@ class TurboAssembler : public Assembler {
void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
Register scratch = at);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
@@ -1091,10 +1106,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support.
void MaybeDropFrames();
@@ -1156,6 +1167,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
BranchDelaySlot bd = PROTECT,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1301,4 +1315,4 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#endif // V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index ebb8a76ad7..9177f8e6aa 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -715,8 +715,7 @@ void MipsDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -814,11 +813,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_size_ = FLAG_sim_stack_size * KB;
@@ -2484,8 +2478,7 @@ void Simulator::PrintStopInfo(uint64_t code) {
void Simulator::SignalException(Exception e) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.",
- static_cast<int>(e));
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
}
// Min/Max template functions for Double and Single arguments.
@@ -5914,7 +5907,8 @@ void Simulator::DecodeTypeMsa3RF() {
case MSUB_Q:
case MADDR_Q:
case MSUBR_Q:
- get_msa_register(wd_reg(), &wd); // fall-through
+ get_msa_register(wd_reg(), &wd);
+ V8_FALLTHROUGH;
case MUL_Q:
case MULR_Q:
switch (DecodeMsaDataFormat()) {
@@ -7260,7 +7254,7 @@ void Simulator::DecodeTypeJump() {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index c4292236b0..115dde2103 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -9,8 +9,8 @@
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
-#ifndef V8_MIPS_SIMULATOR_MIPS_H_
-#define V8_MIPS_SIMULATOR_MIPS_H_
+#ifndef V8_MIPS64_SIMULATOR_MIPS64_H_
+#define V8_MIPS64_SIMULATOR_MIPS64_H_
#include "src/allocation.h"
#include "src/mips64/constants-mips64.h"
@@ -260,6 +260,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -472,10 +473,10 @@ class Simulator : public SimulatorBase {
Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
- V8_Fatal(__FILE__, __LINE__,
- "Error: Unexpected instruction 0x%08x immediately after a "
- "compact branch instruction.",
- *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
+ FATAL(
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
}
}
@@ -502,9 +503,8 @@ class Simulator : public SimulatorBase {
}
if (instr->IsForbiddenAfterBranch()) {
- V8_Fatal(__FILE__, __LINE__,
- "Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeValue());
+ FATAL("Eror:Unexpected %i opcode in a branch delay slot.",
+ instr->OpcodeValue());
}
InstructionDecode(instr);
SNPrintF(trace_buf_, " ");
@@ -559,9 +559,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation.
- base::CustomMatcherHashMap* i_cache_;
-
v8::internal::Isolate* isolate_;
// Registered breakpoints.
@@ -586,4 +583,4 @@ class Simulator : public SimulatorBase {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_MIPS_SIMULATOR_MIPS_H_
+#endif // V8_MIPS64_SIMULATOR_MIPS64_H_
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index bd391d272b..a3e51e15e7 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -349,8 +349,7 @@ class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kRelocationInfoOffset + kPointerSize == kHandlerTableOffset);
- STATIC_ASSERT(kHandlerTableOffset + kPointerSize ==
+ STATIC_ASSERT(kRelocationInfoOffset + kPointerSize ==
kDeoptimizationDataOffset);
STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
kSourcePositionTableOffset);
@@ -378,9 +377,8 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
RelocIterator it(Code::cast(obj), mode_mask);
- Isolate* isolate = obj->GetIsolate();
for (; !it.done(); it.next()) {
- it.rinfo()->Visit(isolate, v);
+ it.rinfo()->Visit(v);
}
}
@@ -451,6 +449,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
switch (type) {
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case SCOPE_INFO_TYPE:
return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3);
case FIXED_DOUBLE_ARRAY_TYPE:
return ReturnType();
@@ -460,6 +459,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3);
case TRANSITION_ARRAY_TYPE:
return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
+ case FEEDBACK_CELL_TYPE:
+ return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3);
case FEEDBACK_VECTOR_TYPE:
return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3);
case JS_OBJECT_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 142dbf6611..458b807f05 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -17,7 +17,9 @@
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/microtask-inl.h"
#include "src/objects/module.h"
+#include "src/objects/promise-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions.h"
@@ -25,6 +27,28 @@
namespace v8 {
namespace internal {
+// Heap Verification Overview
+// --------------------------
+// - Each InstanceType has a separate XXXVerify method which checks an object's
+// integrity in isolation.
+// - --verify-heap will iterate over all gc spaces and call ObjectVerify() on
+// every encountered tagged pointer.
+// - Verification should be pushed down to the specific instance type if its
+// integrity is independent of an outer object.
+// - In cases where the InstanceType is too genernic (e.g. FixedArray) the
+// XXXVerify of the outer method has to do recursive verification.
+// - If the corresponding objects have inheritence the parent's Verify method
+// is called as well.
+// - For any field containing pointes VerifyPointer(...) should be called.
+//
+// Caveats
+// -------
+// - Assume that any of the verify methods is incomplete!
+// - Some integrity checks are only partially done due to objects being in
+// partially initialized states when a gc happens, for instance when outer
+// objects are allocted before inner ones.
+//
+
#ifdef VERIFY_HEAP
void Object::ObjectVerify() {
@@ -65,12 +89,13 @@ void HeapObject::HeapObjectVerify() {
CHECK(map()->IsMap());
InstanceType instance_type = map()->instance_type();
- if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(this)->StringVerify();
- return;
- }
switch (instance_type) {
+#define STRING_TYPE_CASE(TYPE, size, name, camel_name) case TYPE:
+ STRING_TYPE_LIST(STRING_TYPE_CASE)
+#undef STRING_TYPE_CASE
+ String::cast(this)->StringVerify();
+ break;
case SYMBOL_TYPE:
Symbol::cast(this)->SymbolVerify();
break;
@@ -86,6 +111,7 @@ void HeapObject::HeapObjectVerify() {
break;
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case SCOPE_INFO_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -109,6 +135,9 @@ void HeapObject::HeapObjectVerify() {
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
+ case FEEDBACK_CELL_TYPE:
+ FeedbackCell::cast(this)->FeedbackCellVerify();
+ break;
case FEEDBACK_VECTOR_TYPE:
FeedbackVector::cast(this)->FeedbackVectorVerify();
break;
@@ -267,10 +296,6 @@ void HeapObject::HeapObjectVerify() {
case STORE_HANDLER_TYPE:
StoreHandler::cast(this)->StoreHandlerVerify();
break;
-
- default:
- UNREACHABLE();
- break;
}
}
@@ -287,6 +312,7 @@ void Symbol::SymbolVerify() {
CHECK(HasHashCode());
CHECK_GT(Hash(), 0);
CHECK(name()->IsUndefined(GetIsolate()) || name()->IsString());
+ CHECK_IMPLIES(IsPrivateField(), IsPrivate());
}
@@ -315,6 +341,13 @@ void FreeSpace::FreeSpaceVerify() {
CHECK(IsFreeSpace());
}
+void FeedbackCell::FeedbackCellVerify() {
+ CHECK(IsFeedbackCell());
+ Isolate* const isolate = GetIsolate();
+ VerifyHeapPointer(value());
+ CHECK(value()->IsUndefined(isolate) || value()->IsFeedbackVector());
+}
+
void FeedbackVector::FeedbackVectorVerify() { CHECK(IsFeedbackVector()); }
template <class Traits>
@@ -742,6 +775,9 @@ void JSBoundFunction::JSBoundFunctionVerify() {
void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
+ JSObjectVerify();
+ VerifyHeapPointer(feedback_cell());
+ CHECK(feedback_cell()->IsFeedbackCell());
CHECK(code()->IsCode());
CHECK(map()->is_callable());
if (has_prototype_slot()) {
@@ -758,7 +794,6 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kFeedbackMetadataOffset);
VerifyObjectField(kFunctionDataOffset);
VerifyObjectField(kFunctionIdentifierOffset);
- VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kNameOffset);
VerifyObjectField(kOuterScopeInfoOffset);
VerifyObjectField(kScopeInfoOffset);
@@ -1041,33 +1076,102 @@ void JSWeakSet::JSWeakSetVerify() {
CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
+void Microtask::MicrotaskVerify() { CHECK(IsMicrotask()); }
+
+void CallableTask::CallableTaskVerify() {
+ CHECK(IsCallableTask());
+ MicrotaskVerify();
+ VerifyHeapPointer(callable());
+ CHECK(callable()->IsCallable());
+ VerifyHeapPointer(context());
+ CHECK(context()->IsContext());
+}
+
+void CallbackTask::CallbackTaskVerify() {
+ CHECK(IsCallbackTask());
+ MicrotaskVerify();
+ VerifyHeapPointer(callback());
+ VerifyHeapPointer(data());
+}
+
+void PromiseReactionJobTask::PromiseReactionJobTaskVerify() {
+ CHECK(IsPromiseReactionJobTask());
+ MicrotaskVerify();
+ Isolate* isolate = GetIsolate();
+ VerifyPointer(argument());
+ VerifyHeapPointer(context());
+ CHECK(context()->IsContext());
+ VerifyHeapPointer(handler());
+ VerifyHeapPointer(payload());
+ if (handler()->IsCode()) {
+ CHECK(payload()->IsJSReceiver());
+ } else {
+ CHECK(handler()->IsUndefined(isolate) || handler()->IsCallable());
+ CHECK(payload()->IsJSPromise() || payload()->IsPromiseCapability());
+ }
+}
+
+void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskVerify() {
+ CHECK(IsPromiseFulfillReactionJobTask());
+ PromiseReactionJobTaskVerify();
+}
+
+void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskVerify() {
+ CHECK(IsPromiseRejectReactionJobTask());
+ PromiseReactionJobTaskVerify();
+}
+
+void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskVerify() {
+ CHECK(IsPromiseResolveThenableJobTask());
+ MicrotaskVerify();
+ VerifyHeapPointer(context());
+ CHECK(context()->IsContext());
+ VerifyHeapPointer(promise_to_resolve());
+ CHECK(promise_to_resolve()->IsJSPromise());
+ VerifyHeapPointer(then());
+ CHECK(then()->IsCallable());
+ CHECK(then()->IsJSReceiver());
+ VerifyHeapPointer(thenable());
+ CHECK(thenable()->IsJSReceiver());
+}
+
void PromiseCapability::PromiseCapabilityVerify() {
CHECK(IsPromiseCapability());
- VerifyPointer(promise());
+ Isolate* isolate = GetIsolate();
+ VerifyHeapPointer(promise());
+ CHECK(promise()->IsJSReceiver() || promise()->IsUndefined(isolate));
VerifyPointer(resolve());
VerifyPointer(reject());
}
+void PromiseReaction::PromiseReactionVerify() {
+ CHECK(IsPromiseReaction());
+ Isolate* isolate = GetIsolate();
+ VerifyPointer(next());
+ CHECK(next()->IsSmi() || next()->IsPromiseReaction());
+ VerifyHeapPointer(reject_handler());
+ VerifyHeapPointer(fulfill_handler());
+ VerifyHeapPointer(payload());
+ if (reject_handler()->IsCode()) {
+ CHECK(fulfill_handler()->IsCode());
+ CHECK(payload()->IsJSReceiver());
+ } else {
+ CHECK(reject_handler()->IsUndefined(isolate) ||
+ reject_handler()->IsCallable());
+ CHECK(fulfill_handler()->IsUndefined(isolate) ||
+ fulfill_handler()->IsCallable());
+ CHECK(payload()->IsJSPromise() || payload()->IsPromiseCapability());
+ }
+}
+
void JSPromise::JSPromiseVerify() {
CHECK(IsJSPromise());
JSObjectVerify();
- Isolate* isolate = GetIsolate();
- CHECK(result()->IsUndefined(isolate) || result()->IsObject());
- CHECK(deferred_promise()->IsUndefined(isolate) ||
- deferred_promise()->IsJSReceiver() ||
- deferred_promise()->IsFixedArray());
- CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
- deferred_on_resolve()->IsCallable() ||
- deferred_on_resolve()->IsFixedArray());
- CHECK(deferred_on_reject()->IsUndefined(isolate) ||
- deferred_on_reject()->IsCallable() ||
- deferred_on_reject()->IsFixedArray());
- CHECK(fulfill_reactions()->IsUndefined(isolate) ||
- fulfill_reactions()->IsCallable() || fulfill_reactions()->IsSymbol() ||
- fulfill_reactions()->IsFixedArray());
- CHECK(reject_reactions()->IsUndefined(isolate) ||
- reject_reactions()->IsSymbol() || reject_reactions()->IsCallable() ||
- reject_reactions()->IsFixedArray());
+ VerifyPointer(reactions_or_result());
+ VerifySmiField(kFlagsOffset);
+ if (status() == Promise::kPending) {
+ CHECK(reactions()->IsSmi() || reactions()->IsPromiseReaction());
+ }
}
template <typename Derived>
@@ -1214,33 +1318,6 @@ void Foreign::ForeignVerify() {
}
-void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoVerify() {
- CHECK(IsPromiseResolveThenableJobInfo());
- CHECK(thenable()->IsJSReceiver());
- CHECK(then()->IsJSReceiver());
- CHECK(resolve()->IsJSFunction());
- CHECK(reject()->IsJSFunction());
- CHECK(context()->IsContext());
-}
-
-void PromiseReactionJobInfo::PromiseReactionJobInfoVerify() {
- Isolate* isolate = GetIsolate();
- CHECK(IsPromiseReactionJobInfo());
- CHECK(value()->IsObject());
- CHECK(tasks()->IsFixedArray() || tasks()->IsCallable() ||
- tasks()->IsSymbol());
- CHECK(deferred_promise()->IsUndefined(isolate) ||
- deferred_promise()->IsJSReceiver() ||
- deferred_promise()->IsFixedArray());
- CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
- deferred_on_resolve()->IsCallable() ||
- deferred_on_resolve()->IsFixedArray());
- CHECK(deferred_on_reject()->IsUndefined(isolate) ||
- deferred_on_reject()->IsCallable() ||
- deferred_on_reject()->IsFixedArray());
- CHECK(context()->IsContext());
-}
-
void AsyncGeneratorRequest::AsyncGeneratorRequestVerify() {
CHECK(IsAsyncGeneratorRequest());
VerifySmiField(kResumeModeOffset);
@@ -1256,8 +1333,6 @@ void BigInt::BigIntVerify() {
CHECK(IsBigInt());
CHECK_GE(length(), 0);
CHECK_IMPLIES(is_zero(), !sign()); // There is no -0n.
- // TODO(neis): Somewhere check that MSD is non-zero. Doesn't hold during some
- // operations that allocate which is why we can't test it here.
}
void JSModuleNamespace::JSModuleNamespaceVerify() {
@@ -1298,7 +1373,7 @@ void Module::ModuleVerify() {
CHECK((status() >= kEvaluating && code()->IsModuleInfo()) ||
(status() == kInstantiated && code()->IsJSGeneratorObject()) ||
- (status() >= kInstantiating && code()->IsJSFunction()) ||
+ (status() == kInstantiating && code()->IsJSFunction()) ||
(code()->IsSharedFunctionInfo()));
CHECK_EQ(status() == kErrored, !exception()->IsTheHole(GetIsolate()));
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 1cbc2ca418..9c3ac94ab5 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -40,6 +40,7 @@
#include "src/objects/hash-table.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-inl.h"
@@ -78,13 +79,14 @@ int PropertyDetails::field_width_in_words() const {
}
TYPE_CHECKER(BigInt, BIGINT_TYPE)
-TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
+TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
TYPE_CHECKER(CallHandlerInfo, TUPLE3_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
+TYPE_CHECKER(FeedbackCell, FEEDBACK_CELL_TYPE)
TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
@@ -100,7 +102,6 @@ TYPE_CHECKER(JSError, JS_ERROR_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
@@ -109,11 +110,11 @@ TYPE_CHECKER(PreParsedScopeData, TUPLE2_TYPE)
TYPE_CHECKER(PropertyArray, PROPERTY_ARRAY_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(PropertyDescriptorObject, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(ScopeInfo, SCOPE_INFO_TYPE)
TYPE_CHECKER(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE)
TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
-TYPE_CHECKER(TemplateMap, HASH_TABLE_TYPE)
-TYPE_CHECKER(TemplateObjectDescription, TUPLE3_TYPE)
+TYPE_CHECKER(TemplateObjectDescription, TUPLE2_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
@@ -322,7 +323,15 @@ bool HeapObject::IsJSWeakCollection() const {
bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
-bool HeapObject::IsPromiseCapability() const { return IsTuple3(); }
+bool HeapObject::IsMicrotask() const {
+ InstanceType instance_type = map()->instance_type();
+ return (instance_type >= FIRST_MICROTASK_TYPE &&
+ instance_type <= LAST_MICROTASK_TYPE);
+}
+
+bool HeapObject::IsPromiseReactionJobTask() const {
+ return IsPromiseFulfillReactionJobTask() || IsPromiseRejectReactionJobTask();
+}
bool HeapObject::IsEnumCache() const { return IsTuple2(); }
@@ -395,10 +404,6 @@ bool HeapObject::IsScriptContextTable() const {
return map() == GetHeap()->script_context_table_map();
}
-bool HeapObject::IsScopeInfo() const {
- return map() == GetHeap()->scope_info_map();
-}
-
template <>
inline bool Is<JSFunction>(Object* obj) {
return obj->IsJSFunction();
@@ -412,6 +417,22 @@ bool HeapObject::IsStringWrapper() const {
return IsJSValue() && JSValue::cast(this)->value()->IsString();
}
+bool HeapObject::IsBooleanWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsBoolean();
+}
+
+bool HeapObject::IsScriptWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsScript();
+}
+
+bool HeapObject::IsNumberWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsNumber();
+}
+
+bool HeapObject::IsSymbolWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsSymbol();
+}
+
bool HeapObject::IsBoolean() const {
return IsOddball() &&
((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
@@ -441,6 +462,10 @@ bool HeapObject::IsNumberDictionary() const {
return map() == GetHeap()->number_dictionary_map();
}
+bool HeapObject::IsSimpleNumberDictionary() const {
+ return map() == GetHeap()->simple_number_dictionary_map();
+}
+
bool HeapObject::IsStringTable() const {
return map() == GetHeap()->string_table_map();
}
@@ -556,6 +581,7 @@ CAST_ACCESSOR(ConstantElementsPair)
CAST_ACCESSOR(ContextExtension)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EnumCache)
+CAST_ACCESSOR(FeedbackCell)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FunctionTemplateInfo)
CAST_ACCESSOR(GlobalDictionary)
@@ -572,7 +598,6 @@ CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSStringIterator)
@@ -580,6 +605,7 @@ CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(NumberDictionary)
CAST_ACCESSOR(Object)
CAST_ACCESSOR(ObjectHashSet)
CAST_ACCESSOR(ObjectHashTable)
@@ -587,15 +613,12 @@ CAST_ACCESSOR(ObjectTemplateInfo)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PromiseCapability)
-CAST_ACCESSOR(PromiseReactionJobInfo)
-CAST_ACCESSOR(PromiseResolveThenableJobInfo)
CAST_ACCESSOR(PropertyArray)
CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(PrototypeInfo)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(NumberDictionary)
+CAST_ACCESSOR(SimpleNumberDictionary)
CAST_ACCESSOR(SmallOrderedHashMap)
CAST_ACCESSOR(SmallOrderedHashSet)
CAST_ACCESSOR(Smi)
@@ -605,7 +628,6 @@ CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(TemplateMap)
CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
@@ -805,10 +827,6 @@ MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
return ConvertToIndex(isolate, input, error_index);
}
-bool Object::HasSpecificClassOf(String* name) {
- return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
-}
-
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
Handle<Name> name) {
LookupIterator it(object, name);
@@ -1371,6 +1389,7 @@ Handle<Object> Oddball::ToNumber(Handle<Oddball> input) {
ACCESSORS(Cell, value, Object, kValueOffset)
+ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(PropertyCell, name, Name, kNameOffset)
ACCESSORS(PropertyCell, value, Object, kValueOffset)
@@ -1716,13 +1735,14 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
return UPDATE_WRITE_BARRIER;
}
-AllocationAlignment HeapObject::RequiredAlignment() const {
+AllocationAlignment HeapObject::RequiredAlignment(Map* map) {
#ifdef V8_HOST_ARCH_32_BIT
- if ((IsFixedFloat64Array() || IsFixedDoubleArray()) &&
- FixedArrayBase::cast(this)->length() != 0) {
+ int instance_type = map->instance_type();
+ if (instance_type == FIXED_FLOAT64_ARRAY_TYPE ||
+ instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
return kDoubleAligned;
}
- if (IsHeapNumber()) return kDoubleUnaligned;
+ if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
#endif // V8_HOST_ARCH_32_BIT
return kWordAligned;
}
@@ -2246,7 +2266,7 @@ ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, feedback_vector_cell, Cell, kFeedbackVectorOffset)
+ACCESSORS(JSFunction, feedback_cell, FeedbackCell, kFeedbackCellOffset)
ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
@@ -2272,22 +2292,6 @@ bool AccessorInfo::has_getter() {
return result;
}
-ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, reject, JSFunction, kRejectOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, context, Context, kContextOffset);
-
-ACCESSORS(PromiseReactionJobInfo, value, Object, kValueOffset);
-ACCESSORS(PromiseReactionJobInfo, tasks, Object, kTasksOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred_promise, Object,
- kDeferredPromiseOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred_on_resolve, Object,
- kDeferredOnResolveOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred_on_reject, Object,
- kDeferredOnRejectOffset);
-ACCESSORS(PromiseReactionJobInfo, context, Context, kContextOffset);
-
ACCESSORS(AsyncGeneratorRequest, next, Object, kNextOffset)
SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
ACCESSORS(AsyncGeneratorRequest, value, Object, kValueOffset)
@@ -2356,7 +2360,6 @@ bool ConstantElementsPair::is_empty() const {
return constant_values()->length() == 0;
}
-SMI_ACCESSORS(TemplateObjectDescription, hash, kHashOffset)
ACCESSORS(TemplateObjectDescription, raw_strings, FixedArray, kRawStringsOffset)
ACCESSORS(TemplateObjectDescription, cooked_strings, FixedArray,
kCookedStringsOffset)
@@ -2494,7 +2497,7 @@ SMI_ACCESSORS(StackFrameInfo, id, kIdIndex)
ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
kSourcePositionTableIndex)
ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- NumberDictionary, kStackFrameCacheIndex)
+ SimpleNumberDictionary, kStackFrameCacheIndex)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -2512,8 +2515,8 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
kAcceptAnyReceiver)
FeedbackVector* JSFunction::feedback_vector() const {
- DCHECK(feedback_vector_cell()->value()->IsFeedbackVector());
- return FeedbackVector::cast(feedback_vector_cell()->value());
+ DCHECK(has_feedback_vector());
+ return FeedbackVector::cast(feedback_cell()->value());
}
// Code objects that are marked for deoptimization are not considered to be
@@ -2620,21 +2623,7 @@ void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
}
bool JSFunction::has_feedback_vector() const {
- return !feedback_vector_cell()->value()->IsUndefined(GetIsolate());
-}
-
-JSFunction::FeedbackVectorState JSFunction::GetFeedbackVectorState(
- Isolate* isolate) const {
- Cell* cell = feedback_vector_cell();
- if (shared()->HasAsmWasmData()) {
- return NO_VECTOR_NEEDED;
- } else if (cell == isolate->heap()->undefined_cell()) {
- return TOP_LEVEL_SCRIPT_NEEDS_VECTOR;
- } else if (cell->value() == isolate->heap()->undefined_value() ||
- !has_feedback_vector()) {
- return NEEDS_VECTOR;
- }
- return HAS_VECTOR;
+ return !feedback_cell()->value()->IsUndefined(GetIsolate());
}
Context* JSFunction::context() {
@@ -2769,8 +2758,7 @@ bool JSGeneratorObject::is_executing() const {
}
ACCESSORS(JSAsyncGeneratorObject, queue, HeapObject, kQueueOffset)
-ACCESSORS(JSAsyncGeneratorObject, awaited_promise, HeapObject,
- kAwaitedPromiseOffset)
+SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting, kIsAwaitingOffset)
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -2806,23 +2794,6 @@ SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
-
-
-ACCESSORS(PromiseCapability, promise, Object, kPromiseOffset)
-ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
-ACCESSORS(PromiseCapability, reject, Object, kRejectOffset)
-
-ACCESSORS(JSPromise, result, Object, kResultOffset)
-ACCESSORS(JSPromise, deferred_promise, Object, kDeferredPromiseOffset)
-ACCESSORS(JSPromise, deferred_on_resolve, Object, kDeferredOnResolveOffset)
-ACCESSORS(JSPromise, deferred_on_reject, Object, kDeferredOnRejectOffset)
-ACCESSORS(JSPromise, fulfill_reactions, Object, kFulfillReactionsOffset)
-ACCESSORS(JSPromise, reject_reactions, Object, kRejectReactionsOffset)
-SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
-BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
-BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
-
-
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
#if VERIFY_HEAP && DEBUG
@@ -3304,30 +3275,35 @@ void GlobalDictionary::ValueAtPut(int entry, Object* value) {
set(EntryToIndex(entry), value);
}
-bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
+bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object* other) {
DCHECK(other->IsNumber());
return key == static_cast<uint32_t>(other->Number());
}
-uint32_t NumberDictionaryShape::Hash(Isolate* isolate, uint32_t key) {
+uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
return ComputeIntegerHash(key, isolate->heap()->HashSeed());
}
-uint32_t NumberDictionaryShape::HashForObject(Isolate* isolate, Object* other) {
+uint32_t NumberDictionaryBaseShape::HashForObject(Isolate* isolate,
+ Object* other) {
DCHECK(other->IsNumber());
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()),
isolate->heap()->HashSeed());
}
+Handle<Object> NumberDictionaryBaseShape::AsHandle(Isolate* isolate,
+ uint32_t key) {
+ return isolate->factory()->NewNumberFromUint(key);
+}
+
int NumberDictionaryShape::GetMapRootIndex() {
return Heap::kNumberDictionaryMapRootIndex;
}
-Handle<Object> NumberDictionaryShape::AsHandle(Isolate* isolate, uint32_t key) {
- return isolate->factory()->NewNumberFromUint(key);
+int SimpleNumberDictionaryShape::GetMapRootIndex() {
+ return Heap::kSimpleNumberDictionaryMapRootIndex;
}
-
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object* other) {
DCHECK(other->IsTheHole(key->GetIsolate()) ||
Name::cast(other)->IsUniqueName());
@@ -3488,6 +3464,7 @@ ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
kSyncIteratorOffset)
+ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index f13c222632..68f147f7d4 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -13,6 +13,8 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/promise-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions-inl.h"
@@ -41,7 +43,6 @@ void Object::Print(std::ostream& os) { // NOLINT
}
}
-
void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
os << reinterpret_cast<void*>(this) << ": [";
if (id != nullptr) {
@@ -51,6 +52,7 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
}
os << "]";
if (GetHeap()->InOldSpace(this)) os << " in OldSpace";
+ if (!IsMap()) os << "\n - map: " << Brief(map());
}
@@ -106,6 +108,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case TRANSITION_ARRAY_TYPE:
TransitionArray::cast(this)->TransitionArrayPrint(os);
break;
+ case FEEDBACK_CELL_TYPE:
+ FeedbackCell::cast(this)->FeedbackCellPrint(os);
+ break;
case FEEDBACK_VECTOR_TYPE:
FeedbackVector::cast(this)->FeedbackVectorPrint(os);
break;
@@ -244,10 +249,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case LOAD_HANDLER_TYPE:
LoadHandler::cast(this)->LoadHandlerPrint(os);
break;
-
case STORE_HANDLER_TYPE:
StoreHandler::cast(this)->StoreHandlerPrint(os);
break;
+ case SCOPE_INFO_TYPE:
+ ScopeInfo::cast(this)->ScopeInfoPrint(os);
+ break;
default:
os << "UNKNOWN TYPE " << map()->instance_type();
@@ -412,14 +419,14 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
SloppyArgumentsElements* elements) {
Isolate* isolate = elements->GetIsolate();
FixedArray* arguments_store = elements->arguments();
- os << "\n 0: context = " << Brief(elements->context())
- << "\n 1: arguments_store = " << Brief(arguments_store)
+ os << "\n 0: context: " << Brief(elements->context())
+ << "\n 1: arguments_store: " << Brief(arguments_store)
<< "\n parameter to context slot map:";
for (uint32_t i = 0; i < elements->parameter_map_length(); i++) {
uint32_t raw_index = i + SloppyArgumentsElements::kParameterMapStart;
Object* mapped_entry = elements->get_mapped_entry(i);
os << "\n " << raw_index << ": param(" << i
- << ") = " << Brief(mapped_entry);
+ << "): " << Brief(mapped_entry);
if (mapped_entry->IsTheHole(isolate)) {
os << " in the arguments_store[" << i << "]";
} else {
@@ -428,7 +435,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
if (arguments_store->length() == 0) return;
os << "\n }"
- << "\n - arguments_store = " << Brief(arguments_store) << " "
+ << "\n - arguments_store: " << Brief(arguments_store) << " "
<< ElementsKindToString(arguments_store->map()->elements_kind()) << " {";
if (kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
PrintFixedArrayElements(os, arguments_store);
@@ -443,7 +450,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
void JSObject::PrintElements(std::ostream& os) { // NOLINT
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- os << " - elements = " << Brief(elements()) << " {";
+ os << " - elements: " << Brief(elements()) << " {";
if (elements()->length() == 0) {
os << " }\n";
return;
@@ -492,21 +499,21 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
obj->PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " [";
+ os << " [";
if (obj->HasFastProperties()) {
os << "FastProperties";
} else {
os << "DictionaryProperties";
}
PrototypeIterator iter(obj->GetIsolate(), obj);
- os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
- os << "\n - elements = " << Brief(obj->elements()) << " ["
+ os << "]\n - prototype: " << Brief(iter.GetCurrent());
+ os << "\n - elements: " << Brief(obj->elements()) << " ["
<< ElementsKindToString(obj->map()->elements_kind());
if (obj->elements()->IsCowArray()) os << " (COW)";
os << "]";
Object* hash = obj->GetHash();
if (hash->IsSmi()) {
- os << "\n - hash = " << Brief(hash);
+ os << "\n - hash: " << Brief(hash);
}
if (obj->GetEmbedderFieldCount() > 0) {
os << "\n - embedder fields: " << obj->GetEmbedderFieldCount();
@@ -516,7 +523,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
bool print_elements = true) {
- os << "\n - properties = ";
+ os << "\n - properties: ";
Object* properties_or_hash = obj->raw_properties_or_hash();
if (!properties_or_hash->IsSmi()) {
os << Brief(properties_or_hash);
@@ -545,27 +552,26 @@ void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArray");
- os << "\n - length = " << Brief(this->length());
+ os << "\n - length: " << Brief(this->length());
JSObjectPrintBody(os, this);
}
void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSPromise");
- os << "\n - status = " << JSPromise::Status(status());
- os << "\n - result = " << Brief(result());
- os << "\n - deferred_promise: " << Brief(deferred_promise());
- os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
- os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
- os << "\n - fulfill_reactions = " << Brief(fulfill_reactions());
- os << "\n - reject_reactions = " << Brief(reject_reactions());
- os << "\n - has_handler = " << has_handler();
+ os << "\n - status: " << JSPromise::Status(status());
+ if (status() == Promise::kPending) {
+ os << "\n - reactions: " << Brief(reactions());
+ } else {
+ os << "\n - result: " << Brief(result());
+ }
+ os << "\n - has_handler: " << has_handler();
os << "\n ";
}
void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSRegExp");
- os << "\n - data = " << Brief(data());
- os << "\n - source = " << Brief(source());
+ os << "\n - data: " << Brief(data());
+ os << "\n - source: " << Brief(source());
JSObjectPrintBody(os, this);
}
@@ -654,7 +660,6 @@ void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, IsHashTable() ? "HashTable" : "FixedArray");
- os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
PrintFixedArrayElements(os, this);
os << "\n";
@@ -662,7 +667,6 @@ void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyArray");
- os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
os << "\n - hash: " << Hash();
PrintFixedArrayElements(os, this);
@@ -671,7 +675,6 @@ void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedDoubleArray");
- os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
DoPrintElements<FixedDoubleArray>(os, this);
os << "\n";
@@ -689,20 +692,31 @@ void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-template void FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::Print();
-template void FeedbackVectorSpecBase<FeedbackVectorSpec>::Print();
+void FeedbackCell::FeedbackCellPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "FeedbackCell");
+ if (map() == GetHeap()->no_closures_cell_map()) {
+ os << "\n - no closures";
+ } else if (map() == GetHeap()->one_closure_cell_map()) {
+ os << "\n - one closure";
+ } else if (map() == GetHeap()->many_closures_cell_map()) {
+ os << "\n - many closures";
+ } else {
+ os << "\n - Invalid FeedbackCell map";
+ }
+ os << " - value: " << Brief(value());
+ os << "\n";
+}
-template <typename Derived>
-void FeedbackVectorSpecBase<Derived>::Print() {
+void FeedbackVectorSpec::Print() {
OFStream os(stdout);
+
FeedbackVectorSpecPrint(os);
+
os << std::flush;
}
-template <typename Derived>
-void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
- std::ostream& os) { // NOLINT
- int slot_count = This()->slots();
+void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
+ int slot_count = slots();
os << " - slot_count: " << slot_count;
if (slot_count == 0) {
os << " (empty)\n";
@@ -710,7 +724,7 @@ void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
}
for (int slot = 0; slot < slot_count;) {
- FeedbackSlotKind kind = This()->GetKind(FeedbackSlot(slot));
+ FeedbackSlotKind kind = GetKind(FeedbackSlot(slot));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
DCHECK_LT(0, entry_size);
os << "\n Slot #" << slot << " " << kind;
@@ -773,7 +787,7 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
FeedbackSlotKind kind = iter.kind();
os << "\n - slot " << slot << " " << kind << " ";
- FeedbackSlotPrint(os, slot, kind);
+ FeedbackSlotPrint(os, slot);
int entry_size = iter.entry_size();
if (entry_size > 0) os << " {";
@@ -788,71 +802,62 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
void FeedbackVector::FeedbackSlotPrint(std::ostream& os,
FeedbackSlot slot) { // NOLINT
- FeedbackSlotPrint(os, slot, GetKind(slot));
+ FeedbackNexus nexus(this, slot);
+ nexus.Print(os);
}
-void FeedbackVector::FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
- FeedbackSlotKind kind) { // NOLINT
- switch (kind) {
- case FeedbackSlotKind::kLoadProperty: {
- LoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
+namespace {
+
+const char* ICState2String(InlineCacheState state) {
+ switch (state) {
+ case UNINITIALIZED:
+ return "UNINITIALIZED";
+ case PREMONOMORPHIC:
+ return "PREMONOMORPHIC";
+ case MONOMORPHIC:
+ return "MONOMORPHIC";
+ case RECOMPUTE_HANDLER:
+ return "RECOMPUTE_HANDLER";
+ case POLYMORPHIC:
+ return "POLYMORPHIC";
+ case MEGAMORPHIC:
+ return "MEGAMORPHIC";
+ case GENERIC:
+ return "GENERIC";
+ }
+ UNREACHABLE();
+}
+} // anonymous namespace
+
+void FeedbackNexus::Print(std::ostream& os) { // NOLINT
+ switch (kind()) {
+ case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
- case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
- LoadGlobalICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kLoadKeyed: {
- KeyedLoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kCall: {
- CallICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
+ case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kStoreGlobalSloppy:
- case FeedbackSlotKind::kStoreGlobalStrict: {
- StoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
+ case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kInstanceOf:
+ case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kStoreKeyedStrict: {
- KeyedStoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
+ os << ICState2String(StateFromFeedback());
break;
}
case FeedbackSlotKind::kBinaryOp: {
- BinaryOpICNexus nexus(this, slot);
- os << "BinaryOp:" << nexus.GetBinaryOperationFeedback();
+ os << "BinaryOp:" << GetBinaryOperationFeedback();
break;
}
case FeedbackSlotKind::kCompareOp: {
- CompareICNexus nexus(this, slot);
- os << "CompareOp:" << nexus.GetCompareOperationFeedback();
+ os << "CompareOp:" << GetCompareOperationFeedback();
break;
}
case FeedbackSlotKind::kForIn: {
- ForInICNexus nexus(this, slot);
- os << "ForIn:" << nexus.GetForInFeedback();
- break;
- }
- case FeedbackSlotKind::kInstanceOf: {
- InstanceOfICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- StoreDataPropertyInLiteralICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
+ os << "ForIn:" << GetForInFeedback();
break;
}
case FeedbackSlotKind::kCreateClosure:
@@ -868,7 +873,7 @@ void FeedbackVector::FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot,
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSValue");
- os << "\n - value = " << Brief(value());
+ os << "\n - value: " << Brief(value());
JSObjectPrintBody(os, this);
}
@@ -933,7 +938,7 @@ static const char* const weekdays[] = {
void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDate");
- os << "\n - value = " << Brief(value());
+ os << "\n - value: " << Brief(value());
if (!year()->IsSmi()) {
os << "\n - time = NaN\n";
} else {
@@ -955,10 +960,9 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSProxy");
- os << "\n - map = " << reinterpret_cast<void*>(map());
- os << "\n - target = ";
+ os << "\n - target: ";
target()->ShortPrint(os);
- os << "\n - handler = ";
+ os << "\n - handler: ";
handler()->ShortPrint(os);
os << "\n";
}
@@ -966,21 +970,21 @@ void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
void JSSet::JSSetPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSSet");
- os << " - table = " << Brief(table());
+ os << " - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSMap");
- os << " - table = " << Brief(table());
+ os << " - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSCollectionIterator::JSCollectionIteratorPrint(
std::ostream& os) { // NOLINT
- os << "\n - table = " << Brief(table());
- os << "\n - index = " << Brief(index());
+ os << "\n - table: " << Brief(table());
+ os << "\n - index: " << Brief(index());
os << "\n";
}
@@ -999,22 +1003,22 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSWeakMap");
- os << "\n - table = " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSWeakSet");
- os << "\n - table = " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, this);
}
void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSArrayBuffer");
- os << "\n - backing_store = " << backing_store();
- os << "\n - byte_length = " << Brief(byte_length());
+ os << "\n - backing_store: " << backing_store();
+ os << "\n - byte_length: " << Brief(byte_length());
if (is_external()) os << "\n - external";
if (is_neuterable()) os << "\n - neuterable";
if (was_neutered()) os << "\n - neutered";
@@ -1027,10 +1031,10 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSTypedArray");
- os << "\n - buffer = " << Brief(buffer());
- os << "\n - byte_offset = " << Brief(byte_offset());
- os << "\n - byte_length = " << Brief(byte_length());
- os << "\n - length = " << Brief(length());
+ os << "\n - buffer: " << Brief(buffer());
+ os << "\n - byte_offset: " << Brief(byte_offset());
+ os << "\n - byte_length: " << Brief(byte_length());
+ os << "\n - length: " << Brief(length());
if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
}
@@ -1039,18 +1043,17 @@ void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
JSObjectPrintHeader(os, this, "JSArrayIterator");
InstanceType instance_type = map()->instance_type();
- std::string type;
+ os << "\n - type: ";
if (instance_type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
- type = "keys";
+ os << "keys";
} else if (instance_type <= LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) {
- type = "entries";
+ os << "entries";
} else {
- type = "values";
+ os << "values";
}
- os << "\n - type = " << type;
- os << "\n - object = " << Brief(object());
- os << "\n - index = " << Brief(index());
+ os << "\n - object: " << Brief(object());
+ os << "\n - index: " << Brief(index());
JSObjectPrintBody(os, this);
}
@@ -1058,8 +1061,8 @@ void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDataView");
os << "\n - buffer =" << Brief(buffer());
- os << "\n - byte_offset = " << Brief(byte_offset());
- os << "\n - byte_length = " << Brief(byte_length());
+ os << "\n - byte_offset: " << Brief(byte_offset());
+ os << "\n - byte_length: " << Brief(byte_length());
if (WasNeutered()) os << "\n - neutered";
JSObjectPrintBody(os, this, !WasNeutered());
}
@@ -1067,45 +1070,15 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSBoundFunction");
- os << "\n - bound_target_function = " << Brief(bound_target_function());
- os << "\n - bound_this = " << Brief(bound_this());
- os << "\n - bound_arguments = " << Brief(bound_arguments());
+ os << "\n - bound_target_function: " << Brief(bound_target_function());
+ os << "\n - bound_this: " << Brief(bound_this());
+ os << "\n - bound_arguments: " << Brief(bound_arguments());
JSObjectPrintBody(os, this);
}
-
-namespace {
-
-std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
- os << "[";
- if (kind == FunctionKind::kNormalFunction) {
- os << " NormalFunction";
- } else {
-#define PRINT_FLAG(name) \
- if (static_cast<int>(kind) & static_cast<int>(FunctionKind::k##name)) { \
- os << " " << #name; \
- }
-
- PRINT_FLAG(ArrowFunction)
- PRINT_FLAG(GeneratorFunction)
- PRINT_FLAG(ConciseMethod)
- PRINT_FLAG(DefaultConstructor)
- PRINT_FLAG(DerivedConstructor)
- PRINT_FLAG(BaseConstructor)
- PRINT_FLAG(GetterFunction)
- PRINT_FLAG(SetterFunction)
- PRINT_FLAG(AsyncFunction)
- PRINT_FLAG(Module)
-#undef PRINT_FLAG
- }
- return os << " ]";
-}
-
-} // namespace
-
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "Function");
- os << "\n - function prototype = ";
+ os << "\n - function prototype: ";
if (has_prototype_slot()) {
if (has_prototype()) {
os << Brief(prototype());
@@ -1113,22 +1086,37 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << " (non-instance prototype)";
}
}
- os << "\n - initial_map = ";
+ os << "\n - initial_map: ";
if (has_initial_map()) os << Brief(initial_map());
} else {
os << "<no-prototype-slot>";
}
- os << "\n - shared_info = " << Brief(shared());
- os << "\n - name = " << Brief(shared()->name());
- os << "\n - formal_parameter_count = "
+ os << "\n - shared_info: " << Brief(shared());
+ os << "\n - name: " << Brief(shared()->name());
+
+ // Print Builtin name for builtin functions
+ int builtin_index = code()->builtin_index();
+ if (builtin_index != -1 && !IsInterpreted()) {
+ if (builtin_index == Builtins::kDeserializeLazy) {
+ if (shared()->HasLazyDeserializationBuiltinId()) {
+ builtin_index = shared()->lazy_deserialization_builtin_id();
+ os << "\n - builtin: " << GetIsolate()->builtins()->name(builtin_index)
+ << "(lazy)";
+ }
+ } else {
+ os << "\n - builtin: " << GetIsolate()->builtins()->name(builtin_index);
+ }
+ }
+
+ os << "\n - formal_parameter_count: "
<< shared()->internal_formal_parameter_count();
- os << "\n - kind = " << shared()->kind();
- os << "\n - context = " << Brief(context());
- os << "\n - code = " << Brief(code());
+ os << "\n - kind: " << shared()->kind();
+ os << "\n - context: " << Brief(context());
+ os << "\n - code: " << Brief(code());
if (IsInterpreted()) {
os << "\n - interpreted";
if (shared()->HasBytecodeArray()) {
- os << "\n - bytecode = " << shared()->bytecode_array();
+ os << "\n - bytecode: " << shared()->bytecode_array();
}
}
if (WasmExportedFunction::IsWasmExportedFunction(this)) {
@@ -1142,7 +1130,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
shared()->PrintSourceCode(os);
JSObjectPrintBody(os, this);
os << "\n - feedback vector: ";
- if (feedback_vector_cell()->value()->IsFeedbackVector()) {
+ if (has_feedback_vector()) {
feedback_vector()->FeedbackVectorPrint(os);
} else {
os << "not available\n";
@@ -1151,7 +1139,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
if (HasSourceCode()) {
- os << "\n - source code = ";
+ os << "\n - source code: ";
String* source = String::cast(Script::cast(script())->source());
int start = start_position();
int length = end_position() - start;
@@ -1163,28 +1151,26 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "SharedFunctionInfo");
- os << "\n - name = ";
+ os << "\n - name: ";
if (has_shared_name()) {
os << Brief(raw_name());
} else {
os << "<no-shared-name>";
}
- os << "\n - kind = " << kind();
+ os << "\n - kind: " << kind();
if (needs_home_object()) {
os << "\n - needs_home_object";
}
- os << "\n - function_map_index = " << function_map_index();
- os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
- os << "\n - expected_nof_properties = " << expected_nof_properties();
- os << "\n - language_mode = " << language_mode();
- os << "\n - instance class name = ";
- instance_class_name()->Print(os);
- os << " - code = " << Brief(code());
+ os << "\n - function_map_index: " << function_map_index();
+ os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
+ os << "\n - expected_nof_properties: " << expected_nof_properties();
+ os << "\n - language_mode: " << language_mode();
+ os << " - code: " << Brief(code());
if (HasBytecodeArray()) {
- os << "\n - bytecode_array = " << bytecode_array();
+ os << "\n - bytecode_array: " << bytecode_array();
}
if (HasAsmWasmData()) {
- os << "\n - asm_wasm_data = " << Brief(asm_wasm_data());
+ os << "\n - asm_wasm_data: " << Brief(asm_wasm_data());
}
PrintSourceCode(os);
// Script files are often large, hard to read.
@@ -1197,19 +1183,20 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
} else if (is_declaration()) {
os << "\n - declaration";
}
- os << "\n - function token position = " << function_token_position();
- os << "\n - start position = " << start_position();
- os << "\n - end position = " << end_position();
+ os << "\n - function token position: " << function_token_position();
+ os << "\n - start position: " << start_position();
+ os << "\n - end position: " << end_position();
if (HasDebugInfo()) {
- os << "\n - debug info = " << Brief(debug_info());
+ os << "\n - debug info: " << Brief(debug_info());
} else {
os << "\n - no debug info";
}
- os << "\n - length = " << length();
- os << "\n - feedback_metadata = ";
+ os << "\n - scope info: " << Brief(scope_info());
+ os << "\n - length: " << length();
+ os << "\n - feedback_metadata: ";
feedback_metadata()->FeedbackMetadataPrint(os);
if (HasPreParsedScopeData()) {
- os << "\n - preparsed scope data = " << preparsed_scope_data();
+ os << "\n - preparsed scope data: " << preparsed_scope_data();
} else {
os << "\n - no preparsed scope data";
}
@@ -1220,7 +1207,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalProxy");
if (!GetIsolate()->bootstrapper()->IsActive()) {
- os << "\n - native context = " << Brief(native_context());
+ os << "\n - native context: " << Brief(native_context());
}
JSObjectPrintBody(os, this);
}
@@ -1229,9 +1216,9 @@ void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalObject");
if (!GetIsolate()->bootstrapper()->IsActive()) {
- os << "\n - native context = " << Brief(native_context());
+ os << "\n - native context: " << Brief(native_context());
}
- os << "\n - global proxy = " << Brief(global_proxy());
+ os << "\n - global proxy: " << Brief(global_proxy());
JSObjectPrintBody(os, this);
}
@@ -1338,34 +1325,64 @@ void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseCapability");
- os << "\n - promise: " << Brief(promise());
- os << "\n - resolve: " << Brief(resolve());
- os << "\n - reject: " << Brief(reject());
+void CallbackTask::CallbackTaskPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "CallbackTask");
+ os << "\n - callback: " << Brief(callback());
+ os << "\n - data: " << Brief(data());
+ os << "\n";
+}
+
+void CallableTask::CallableTaskPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "CallableTask");
+ os << "\n - context: " << Brief(context());
+ os << "\n - callable: " << Brief(callable());
os << "\n";
}
-void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
+void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseResolveThenableJobInfo");
- os << "\n - thenable: " << Brief(thenable());
+ HeapObject::PrintHeader(os, "PromiseFulfillReactionJobTask");
+ os << "\n - argument: " << Brief(argument());
+ os << "\n - context: " << Brief(context());
+ os << "\n - handler: " << Brief(handler());
+ os << "\n - payload: " << Brief(payload());
+ os << "\n";
+}
+
+void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseRejectReactionJobTask");
+ os << "\n - argument: " << Brief(argument());
+ os << "\n - context: " << Brief(context());
+ os << "\n - handler: " << Brief(handler());
+ os << "\n - payload: " << Brief(payload());
+ os << "\n";
+}
+
+void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseResolveThenableJobTask");
+ os << "\n - context: " << Brief(context());
+ os << "\n - promise_to_resolve: " << Brief(promise_to_resolve());
os << "\n - then: " << Brief(then());
+ os << "\n - thenable: " << Brief(thenable());
+ os << "\n";
+}
+
+void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseCapability");
+ os << "\n - promise: " << Brief(promise());
os << "\n - resolve: " << Brief(resolve());
os << "\n - reject: " << Brief(reject());
- os << "\n - context: " << Brief(context());
os << "\n";
}
-void PromiseReactionJobInfo::PromiseReactionJobInfoPrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseReactionJobInfo");
- os << "\n - value: " << Brief(value());
- os << "\n - tasks: " << Brief(tasks());
- os << "\n - deferred_promise: " << Brief(deferred_promise());
- os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
- os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
- os << "\n - reaction context: " << Brief(context());
+void PromiseReaction::PromiseReactionPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseReaction");
+ os << "\n - next: " << Brief(next());
+ os << "\n - reject_handler: " << Brief(reject_handler());
+ os << "\n - fulfill_handler: " << Brief(fulfill_handler());
+ os << "\n - payload: " << Brief(payload());
os << "\n";
}
@@ -1616,6 +1633,58 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+namespace {
+void PrintScopeInfoList(ScopeInfo* scope_info, std::ostream& os,
+ const char* list_name, int nof_internal_slots,
+ int start, int length) {
+ if (length <= 0) return;
+ int end = start + length;
+ os << "\n - " << list_name;
+ if (nof_internal_slots > 0) {
+ os << " " << start << "-" << end << " [internal slots]";
+ }
+ os << " {\n";
+ for (int i = nof_internal_slots; start < end; ++i, ++start) {
+ os << " - " << i << ": ";
+ String::cast(scope_info->get(start))->ShortPrint(os);
+ os << "\n";
+ }
+ os << " }";
+}
+} // namespace
+
+void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "ScopeInfo");
+ if (length() == 0) {
+ os << "\n - length = 0";
+ return;
+ }
+
+ os << "\n - scope type: " << scope_type();
+ os << "\n - language mode: " << language_mode();
+ os << "\n - local count: " << LocalCount();
+ os << "\n - stack slot count: " << StackSlotCount();
+ if (HasReceiver()) os << "\n - has receiver";
+ if (HasNewTarget()) os << "\n - needs new target";
+ if (HasOuterScopeInfo()) {
+ os << "\n - outer scope info: " << Brief(OuterScopeInfo());
+ }
+ if (HasFunctionName()) {
+ os << "\n - function name: ";
+ FunctionName()->ShortPrint(os);
+ }
+ os << "\n - length: " << length();
+ if (length() > 0) {
+ PrintScopeInfoList(this, os, "parameters", 0, ParameterNamesIndex(),
+ ParameterCount());
+ PrintScopeInfoList(this, os, "stack slots", 0, StackLocalNamesIndex(),
+ StackLocalCount());
+ PrintScopeInfoList(this, os, "context slots", Context::MIN_CONTEXT_SLOTS,
+ ContextLocalNamesIndex(), ContextLocalCount());
+ // TODO(neis): Print module stuff if present.
+ }
+ os << "\n";
+}
void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "DebugInfo");
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index af2e3eccb3..9e80224d93 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -60,6 +60,8 @@
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/map.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/promise-inl.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
@@ -72,6 +74,7 @@
#include "src/string-stream.h"
#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
+#include "src/unicode-decoder.h"
#include "src/utils-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
@@ -699,39 +702,6 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
// static
-MaybeHandle<Object> Object::Multiply(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(lhs->Number() * rhs->Number());
-}
-
-
-// static
-MaybeHandle<Object> Object::Divide(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(lhs->Number() / rhs->Number());
-}
-
-
-// static
-MaybeHandle<Object> Object::Modulus(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(Modulo(lhs->Number(), rhs->Number()));
-}
-
-
-// static
MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
Handle<Object> rhs) {
if (lhs->IsNumber() && rhs->IsNumber()) {
@@ -757,89 +727,6 @@ MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
// static
-MaybeHandle<Object> Object::Subtract(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumber(lhs->Number() - rhs->Number());
-}
-
-
-// static
-MaybeHandle<Object> Object::ShiftLeft(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs)
- << (NumberToUint32(*rhs) & 0x1F));
-}
-
-
-// static
-MaybeHandle<Object> Object::ShiftRight(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) >>
- (NumberToUint32(*rhs) & 0x1F));
-}
-
-
-// static
-MaybeHandle<Object> Object::ShiftRightLogical(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromUint(NumberToUint32(*lhs) >>
- (NumberToUint32(*rhs) & 0x1F));
-}
-
-
-// static
-MaybeHandle<Object> Object::BitwiseAnd(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) &
- NumberToInt32(*rhs));
-}
-
-
-// static
-MaybeHandle<Object> Object::BitwiseOr(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) |
- NumberToInt32(*rhs));
-}
-
-
-// static
-MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs) {
- if (!lhs->IsNumber() || !rhs->IsNumber()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
- }
- return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) ^
- NumberToInt32(*rhs));
-}
-
-// static
MaybeHandle<Object> Object::OrdinaryHasInstance(Isolate* isolate,
Handle<Object> callable,
Handle<Object> object) {
@@ -1272,7 +1159,7 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
// Support calling this method without an active context, but refuse
// access to access-checked objects in that case.
if (it->isolate()->context() != nullptr && it->HasAccess()) continue;
- // Fall through.
+ V8_FALLTHROUGH;
case LookupIterator::JSPROXY:
it->NotFound();
return it->isolate()->factory()->undefined_value();
@@ -1313,24 +1200,24 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
if (current_info->IsSharedFunctionInfo()) {
return handle(SharedFunctionInfo::cast(current_info), isolate);
}
- Handle<Object> class_name(info->class_name(), isolate);
Handle<Name> name;
Handle<String> name_string;
if (maybe_name.ToHandle(&name) && name->IsString()) {
name_string = Handle<String>::cast(name);
+ } else if (info->class_name()->IsString()) {
+ name_string = handle(String::cast(info->class_name()));
} else {
- name_string = class_name->IsString() ? Handle<String>::cast(class_name)
- : isolate->factory()->empty_string();
+ name_string = isolate->factory()->empty_string();
}
Handle<Code> code = BUILTIN_CODE(isolate, HandleApiCall);
bool is_constructor;
FunctionKind function_kind;
- if (!info->remove_prototype()) {
- is_constructor = true;
- function_kind = kNormalFunction;
- } else {
+ if (info->remove_prototype()) {
is_constructor = false;
function_kind = kConciseMethod;
+ } else {
+ is_constructor = true;
+ function_kind = kNormalFunction;
}
Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
name_string, code, is_constructor, function_kind);
@@ -1339,9 +1226,6 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
}
result->set_length(info->length());
- if (class_name->IsString()) {
- result->set_instance_class_name(String::cast(*class_name));
- }
result->set_api_func_data(*info);
result->DontAdaptArguments();
DCHECK(result->IsApiFunction());
@@ -2482,7 +2366,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
Handle<Object> default_species = isolate->array_function();
if (original_array->IsJSArray() &&
Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
- isolate->IsArraySpeciesLookupChainIntact()) {
+ isolate->IsSpeciesLookupChainIntact()) {
return default_species;
}
Handle<Object> constructor = isolate->factory()->undefined_value();
@@ -3106,6 +2990,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
+ case SCOPE_INFO_TYPE:
return kVisitFixedArray;
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -3114,6 +2999,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case PROPERTY_ARRAY_TYPE:
return kVisitPropertyArray;
+ case FEEDBACK_CELL_TYPE:
+ return kVisitFeedbackCell;
+
case FEEDBACK_VECTOR_TYPE:
return kVisitFeedbackVector;
@@ -3226,6 +3114,8 @@ VisitorId Map::GetVisitorId(Map* map) {
case FIXED_INT32_ARRAY_TYPE:
case FIXED_FLOAT32_ARRAY_TYPE:
case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+ case FIXED_BIGUINT64_ARRAY_TYPE:
+ case FIXED_BIGINT64_ARRAY_TYPE:
return kVisitFixedTypedArrayBase;
case FIXED_FLOAT64_ARRAY_TYPE:
@@ -3401,6 +3291,20 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case PROPERTY_ARRAY_TYPE:
os << "<PropertyArray[" << PropertyArray::cast(this)->length() << "]>";
break;
+ case FEEDBACK_CELL_TYPE: {
+ os << "<FeedbackCell[";
+ if (map() == heap->no_closures_cell_map()) {
+ os << "no closures";
+ } else if (map() == heap->one_closure_cell_map()) {
+ os << "one closure";
+ } else if (map() == heap->many_closures_cell_map()) {
+ os << "many closures";
+ } else {
+ os << "!!!INVALID MAP!!!";
+ }
+ os << "]>";
+ break;
+ }
case FEEDBACK_VECTOR_TYPE:
os << "<FeedbackVector[" << FeedbackVector::cast(this)->length() << "]>";
break;
@@ -3437,6 +3341,9 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
+ case SCOPE_INFO_TYPE:
+ os << "<ScopeInfo[" << ScopeInfo::cast(this)->length() << "]>";
+ break;
case CODE_TYPE: {
Code* code = Code::cast(this);
os << "<Code " << Code::Kind2String(code->kind());
@@ -3540,6 +3447,10 @@ void Tuple3::BriefPrintDetails(std::ostream& os) {
<< Brief(value3());
}
+void CallableTask::BriefPrintDetails(std::ostream& os) {
+ os << " callable=" << Brief(callable());
+}
+
void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
@@ -3589,20 +3500,63 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
(*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
String* JSReceiver::class_name() {
- if (IsFunction()) {
- return GetHeap()->Function_string();
+ if (IsFunction()) return GetHeap()->Function_string();
+ if (IsJSArgumentsObject()) return GetHeap()->Arguments_string();
+ if (IsJSArray()) return GetHeap()->Array_string();
+ if (IsJSArrayBuffer()) {
+ if (JSArrayBuffer::cast(this)->is_shared()) {
+ return GetHeap()->SharedArrayBuffer_string();
+ }
+ return GetHeap()->ArrayBuffer_string();
+ }
+ if (IsJSArrayIterator()) return GetHeap()->ArrayIterator_string();
+ if (IsJSDate()) return GetHeap()->Date_string();
+ if (IsJSError()) return GetHeap()->Error_string();
+ if (IsJSGeneratorObject()) return GetHeap()->Generator_string();
+ if (IsJSMap()) return GetHeap()->Map_string();
+ if (IsJSMapIterator()) return GetHeap()->MapIterator_string();
+ if (IsJSProxy()) {
+ return map()->is_callable() ? GetHeap()->Function_string()
+ : GetHeap()->Object_string();
+ }
+ if (IsJSRegExp()) return GetHeap()->RegExp_string();
+ if (IsJSSet()) return GetHeap()->Set_string();
+ if (IsJSSetIterator()) return GetHeap()->SetIterator_string();
+ if (IsJSTypedArray()) {
+#define SWITCH_KIND(Type, type, TYPE, ctype, size) \
+ if (map()->elements_kind() == TYPE##_ELEMENTS) { \
+ return GetHeap()->Type##Array_string(); \
+ }
+ TYPED_ARRAYS(SWITCH_KIND)
+#undef SWITCH_KIND
+ }
+ if (IsJSValue()) {
+ Object* value = JSValue::cast(this)->value();
+ if (value->IsBoolean()) return GetHeap()->Boolean_string();
+ if (value->IsString()) return GetHeap()->String_string();
+ if (value->IsNumber()) return GetHeap()->Number_string();
+ if (value->IsBigInt()) return GetHeap()->BigInt_string();
+ if (value->IsSymbol()) return GetHeap()->Symbol_string();
+ if (value->IsScript()) return GetHeap()->Script_string();
+ UNREACHABLE();
}
+ if (IsJSWeakMap()) return GetHeap()->WeakMap_string();
+ if (IsJSWeakSet()) return GetHeap()->WeakSet_string();
+ if (IsJSGlobalProxy()) return GetHeap()->global_string();
+
Object* maybe_constructor = map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
- return String::cast(constructor->shared()->instance_class_name());
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
+ if (constructor->shared()->IsApiFunction()) {
+ maybe_constructor = constructor->shared()->get_api_func_data();
+ }
+ }
+
+ if (maybe_constructor->IsFunctionTemplateInfo()) {
FunctionTemplateInfo* info = FunctionTemplateInfo::cast(maybe_constructor);
- return info->class_name()->IsString() ? String::cast(info->class_name())
- : GetHeap()->empty_string();
+ if (info->class_name()->IsString()) return String::cast(info->class_name());
}
- // If the constructor is not present, return "Object".
return GetHeap()->Object_string();
}
@@ -3612,7 +3566,8 @@ bool HeapObject::CanBeRehashed() const {
case HASH_TABLE_TYPE:
// TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
return IsNameDictionary() || IsGlobalDictionary() ||
- IsNumberDictionary() || IsStringTable() || IsWeakHashTable();
+ IsNumberDictionary() || IsSimpleNumberDictionary() ||
+ IsStringTable() || IsWeakHashTable();
case DESCRIPTOR_ARRAY_TYPE:
return true;
case TRANSITION_ARRAY_TYPE:
@@ -3634,6 +3589,8 @@ void HeapObject::RehashBasedOnMap() {
NameDictionary::cast(this)->Rehash();
} else if (IsNumberDictionary()) {
NumberDictionary::cast(this)->Rehash();
+ } else if (IsSimpleNumberDictionary()) {
+ SimpleNumberDictionary::cast(this)->Rehash();
} else if (IsGlobalDictionary()) {
GlobalDictionary::cast(this)->Rehash();
} else if (IsStringTable()) {
@@ -4885,7 +4842,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
if (it->HolderIsReceiverOrHiddenPrototype()) {
return SetDataProperty(it, value);
}
- // Fall through.
+ V8_FALLTHROUGH;
case LookupIterator::TRANSITION:
*found = false;
return Nothing<bool>();
@@ -4970,7 +4927,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
return JSObject::SetPropertyWithAccessor(&own_lookup, value,
should_throw);
}
- // Fall through.
+ V8_FALLTHROUGH;
case LookupIterator::INTEGER_INDEXED_EXOTIC:
return RedefineIncompatibleProperty(isolate, it->GetName(), value,
should_throw);
@@ -5054,17 +5011,32 @@ Maybe<bool> Object::RedefineIncompatibleProperty(Isolate* isolate,
Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
- // Proxies are handled elsewhere. Other non-JSObjects cannot have own
- // properties.
- Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
+ DCHECK_IMPLIES(it->GetReceiver()->IsJSProxy(),
+ it->GetName()->IsPrivateField());
+ DCHECK_IMPLIES(!it->IsElement() && it->GetName()->IsPrivateField(),
+ it->state() == LookupIterator::DATA);
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
// Store on the holder which may be hidden behind the receiver.
DCHECK(it->HolderIsReceiverOrHiddenPrototype());
Handle<Object> to_assign = value;
// Convert the incoming value to a number for storing into typed arrays.
- if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
+ if (it->IsElement() && receiver->IsJSObject() &&
+ JSObject::cast(*receiver)->HasFixedTypedArrayElements()) {
+ ElementsKind elements_kind = JSObject::cast(*receiver)->GetElementsKind();
+ if (elements_kind == BIGINT64_ELEMENTS ||
+ elements_kind == BIGUINT64_ELEMENTS) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(it->isolate(), to_assign,
+ BigInt::FromObject(it->isolate(), value),
+ Nothing<bool>());
+ // We have to recheck the length. However, it can only change if the
+ // underlying buffer was neutered, so just check that.
+ if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
+ return Just(true);
+ // TODO(neis): According to the spec, this should throw a TypeError.
+ }
+ } else if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
// We have to recheck the length. However, it can only change if the
@@ -5085,7 +5057,7 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->JSObjectVerify();
+ receiver->HeapObjectVerify();
}
#endif
return Just(true);
@@ -5096,18 +5068,25 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
PropertyAttributes attributes,
ShouldThrow should_throw,
StoreFromKeyed store_mode) {
- if (!it->GetReceiver()->IsJSObject()) {
- if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
- RETURN_FAILURE(it->isolate(), should_throw,
- NewTypeError(MessageTemplate::kProxyPrivate));
- }
+ if (!it->GetReceiver()->IsJSReceiver()) {
return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
value, should_throw);
}
+ // Private symbols should be installed on JSProxy using
+ // JSProxy::SetPrivateSymbol.
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate() &&
+ !it->GetName()->IsPrivateField()) {
+ RETURN_FAILURE(it->isolate(), should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
+
DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, it->state());
- Handle<JSObject> receiver = it->GetStoreTarget();
+ Handle<JSReceiver> receiver = it->GetStoreTarget<JSReceiver>();
+ DCHECK_IMPLIES(receiver->IsJSProxy(), it->GetName()->IsPrivateField());
+ DCHECK_IMPLIES(receiver->IsJSProxy(),
+ it->state() == LookupIterator::NOT_FOUND);
// If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
// instead. If the prototype is Null, the proxy is detached.
@@ -5141,9 +5120,10 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
}
}
- Maybe<bool> result = JSObject::AddDataElement(receiver, it->index(), value,
- attributes, should_throw);
- JSObject::ValidateElements(*receiver);
+ Handle<JSObject> receiver_obj = Handle<JSObject>::cast(receiver);
+ Maybe<bool> result = JSObject::AddDataElement(
+ receiver_obj, it->index(), value, attributes, should_throw);
+ JSObject::ValidateElements(*receiver_obj);
return result;
} else {
it->UpdateProtector();
@@ -5159,7 +5139,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- receiver->JSObjectVerify();
+ receiver->HeapObjectVerify();
}
#endif
}
@@ -7260,7 +7240,6 @@ Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
return Just(true);
}
-
// TODO(jkummerow): Consider unification with FastAsArrayLength() in
// accessors.cc.
bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
@@ -7472,8 +7451,9 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
ShouldThrow should_throw) {
STACK_CHECK(isolate, Nothing<bool>());
if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
- return SetPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
- should_throw);
+ DCHECK(!Handle<Symbol>::cast(key)->IsPrivateField());
+ return JSProxy::SetPrivateSymbol(isolate, proxy, Handle<Symbol>::cast(key),
+ desc, should_throw);
}
Handle<String> trap_name = isolate->factory()->defineProperty_string();
// 1. Assert: IsPropertyKey(P) is true.
@@ -7576,12 +7556,12 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
-
// static
-Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
- Handle<Symbol> private_name,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ DCHECK(!private_name->IsPrivateField());
// Despite the generic name, this can only add private data properties.
if (!PropertyDescriptor::IsDataDescriptor(desc) ||
desc->ToAttributes() != DONT_ENUM) {
@@ -7611,7 +7591,6 @@ Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
-
// static
Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
Handle<JSReceiver> object,
@@ -8791,9 +8770,10 @@ MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
Handle<JSReceiver> object,
PropertyFilter filter,
+ bool try_fast_path,
bool get_entries) {
Handle<FixedArray> values_or_entries;
- if (filter == ENUMERABLE_STRINGS) {
+ if (try_fast_path && filter == ENUMERABLE_STRINGS) {
Maybe<bool> fast_values_or_entries = FastGetOwnValuesOrEntries(
isolate, object, get_entries, &values_or_entries);
if (fast_values_or_entries.IsNothing()) return MaybeHandle<FixedArray>();
@@ -8846,13 +8826,17 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
}
MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, false);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, false);
}
MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
- PropertyFilter filter) {
- return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, true);
+ PropertyFilter filter,
+ bool try_fast_path) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter,
+ try_fast_path, true);
}
bool Map::DictionaryElementsInPrototypeChainOnly() {
@@ -10027,7 +10011,8 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
}
Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
- Handle<Object> value) {
+ Handle<Object> value,
+ PretenureFlag pretenure) {
if (index < array->length()) {
array->set(index, *value);
return array;
@@ -10037,7 +10022,8 @@ Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
capacity = JSObject::NewElementsCapacity(capacity);
} while (capacity <= index);
Handle<FixedArray> new_array =
- array->GetIsolate()->factory()->NewUninitializedFixedArray(capacity);
+ array->GetIsolate()->factory()->NewUninitializedFixedArray(capacity,
+ pretenure);
array->CopyTo(0, *new_array, 0, array->length());
new_array->FillWithHoles(array->length(), new_array->length());
new_array->set(index, *value);
@@ -10493,54 +10479,6 @@ SharedFunctionInfo* DeoptimizationData::GetInlinedFunction(int index) {
}
}
-int HandlerTable::LookupRange(int pc_offset, int* data_out,
- CatchPrediction* prediction_out) {
- int innermost_handler = -1;
-#ifdef DEBUG
- // Assuming that ranges are well nested, we don't need to track the innermost
- // offsets. This is just to verify that the table is actually well nested.
- int innermost_start = std::numeric_limits<int>::min();
- int innermost_end = std::numeric_limits<int>::max();
-#endif
- for (int i = 0; i < length(); i += kRangeEntrySize) {
- int start_offset = Smi::ToInt(get(i + kRangeStartIndex));
- int end_offset = Smi::ToInt(get(i + kRangeEndIndex));
- int handler_field = Smi::ToInt(get(i + kRangeHandlerIndex));
- int handler_offset = HandlerOffsetField::decode(handler_field);
- CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int handler_data = Smi::ToInt(get(i + kRangeDataIndex));
- if (pc_offset >= start_offset && pc_offset < end_offset) {
- DCHECK_GE(start_offset, innermost_start);
- DCHECK_LT(end_offset, innermost_end);
- innermost_handler = handler_offset;
-#ifdef DEBUG
- innermost_start = start_offset;
- innermost_end = end_offset;
-#endif
- if (data_out) *data_out = handler_data;
- if (prediction_out) *prediction_out = prediction;
- }
- }
- return innermost_handler;
-}
-
-
-// TODO(turbofan): Make sure table is sorted and use binary search.
-int HandlerTable::LookupReturn(int pc_offset) {
- for (int i = 0; i < length(); i += kReturnEntrySize) {
- int return_offset = Smi::ToInt(get(i + kReturnOffsetIndex));
- int handler_field = Smi::ToInt(get(i + kReturnHandlerIndex));
- if (pc_offset == return_offset) {
- return HandlerOffsetField::decode(handler_field);
- }
- }
- return -1;
-}
-
-Handle<HandlerTable> HandlerTable::Empty(Isolate* isolate) {
- return Handle<HandlerTable>::cast(isolate->factory()->empty_fixed_array());
-}
-
#ifdef DEBUG
bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
if (length() != other->length()) return false;
@@ -10560,7 +10498,7 @@ Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
// Perform left trimming if requested.
int left = 0;
UnicodeCache* unicode_cache = isolate->unicode_cache();
- if (mode == kTrim || mode == kTrimLeft) {
+ if (mode == kTrim || mode == kTrimStart) {
while (left < length &&
unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
left++;
@@ -10569,7 +10507,7 @@ Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
// Perform right trimming if requested.
int right = length;
- if (mode == kTrim || mode == kTrimRight) {
+ if (mode == kTrim || mode == kTrimEnd) {
while (
right > left &&
unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
@@ -11908,24 +11846,15 @@ bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
return false;
}
- int i;
- size_t remaining_in_str = static_cast<size_t>(str_len);
- const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start());
- for (i = 0; i < slen && remaining_in_str > 0; i++) {
- size_t cursor = 0;
- uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
- DCHECK(cursor > 0 && cursor <= remaining_in_str);
- if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- if (i > slen - 1) return false;
- if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
- if (Get(i) != unibrow::Utf16::TrailSurrogate(r)) return false;
- } else {
- if (Get(i) != r) return false;
- }
- utf8_data += cursor;
- remaining_in_str -= cursor;
+
+ int i = 0;
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(str);
+ while (i < slen && !it.Done()) {
+ if (Get(i++) != *it) return false;
+ ++it;
}
- return (allow_prefix_match || i == slen) && remaining_in_str == 0;
+
+ return (allow_prefix_match || i == slen) && it.Done();
}
template <>
@@ -12094,37 +12023,31 @@ uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
*utf16_length_out = vector_length;
return HashSequentialString(chars.start(), vector_length, seed);
}
+
// Start with a fake length which won't affect computation.
// It will be updated later.
StringHasher hasher(String::kMaxArrayIndexSize, seed);
- size_t remaining = static_cast<size_t>(vector_length);
- const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
+ DCHECK(hasher.is_array_index_);
+
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(chars);
int utf16_length = 0;
bool is_index = true;
- DCHECK(hasher.is_array_index_);
- while (remaining > 0) {
- size_t consumed = 0;
- uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
- DCHECK(consumed > 0 && consumed <= remaining);
- stream += consumed;
- remaining -= consumed;
- bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode;
- utf16_length += is_two_characters ? 2 : 1;
- // No need to keep hashing. But we do need to calculate utf16_length.
- if (utf16_length > String::kMaxHashCalcLength) continue;
- if (is_two_characters) {
- uint16_t c1 = unibrow::Utf16::LeadSurrogate(c);
- uint16_t c2 = unibrow::Utf16::TrailSurrogate(c);
- hasher.AddCharacter(c1);
- hasher.AddCharacter(c2);
- if (is_index) is_index = hasher.UpdateIndex(c1);
- if (is_index) is_index = hasher.UpdateIndex(c2);
- } else {
- hasher.AddCharacter(c);
- if (is_index) is_index = hasher.UpdateIndex(c);
- }
+
+ while (utf16_length < String::kMaxHashCalcLength && !it.Done()) {
+ utf16_length++;
+ uint16_t c = *it;
+ ++it;
+ hasher.AddCharacter(c);
+ if (is_index) is_index = hasher.UpdateIndex(c);
+ }
+
+ // Now that hashing is done, we just need to calculate utf16_length
+ while (!it.Done()) {
+ ++it;
+ utf16_length++;
}
- *utf16_length_out = static_cast<int>(utf16_length);
+
+ *utf16_length_out = utf16_length;
// Must set length here so that hash computation is correct.
hasher.length_ = utf16_length;
return hasher.GetHashField();
@@ -12283,31 +12206,21 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
// static
-void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Isolate* isolate = shared->GetIsolate();
-
- FeedbackVectorState state = function->GetFeedbackVectorState(isolate);
- switch (state) {
- case TOP_LEVEL_SCRIPT_NEEDS_VECTOR: {
- // A top level script didn't get it's literals installed.
- Handle<FeedbackVector> feedback_vector =
- FeedbackVector::New(isolate, shared);
- Handle<Cell> new_cell =
- isolate->factory()->NewOneClosureCell(feedback_vector);
- function->set_feedback_vector_cell(*new_cell);
- break;
- }
- case NEEDS_VECTOR: {
+void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ if (function->feedback_cell()->value()->IsUndefined(isolate)) {
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (!shared->HasAsmWasmData()) {
Handle<FeedbackVector> feedback_vector =
FeedbackVector::New(isolate, shared);
- function->feedback_vector_cell()->set_value(*feedback_vector);
- break;
+ if (function->feedback_cell() == isolate->heap()->many_closures_cell()) {
+ Handle<FeedbackCell> feedback_cell =
+ isolate->factory()->NewOneClosureCell(feedback_vector);
+ function->set_feedback_cell(*feedback_cell);
+ } else {
+ function->feedback_cell()->set_value(*feedback_vector);
+ }
}
- case HAS_VECTOR:
- case NO_VECTOR_NEEDED:
- // Nothing to do.
- break;
}
}
@@ -12410,9 +12323,7 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
Object* maybe_constructor = object->map()->GetConstructor();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(maybe_constructor);
- Isolate* isolate = object->GetIsolate();
- if (!constructor->shared()->IsApiFunction() &&
- object->class_name() == isolate->heap()->Object_string()) {
+ if (!constructor->shared()->IsApiFunction()) {
Context* context = constructor->context()->native_context();
JSFunction* object_function = context->object_function();
object->map()->SetConstructor(object_function);
@@ -13139,9 +13050,6 @@ bool JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
namespace {
-char const kNativeCodeSource[] = "function () { [native code] }";
-
-
Handle<String> NativeCodeFunctionSourceString(
Handle<SharedFunctionInfo> shared_info) {
Isolate* const isolate = shared_info->GetIsolate();
@@ -13158,7 +13066,7 @@ Handle<String> NativeCodeFunctionSourceString(
// static
Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
Isolate* const isolate = function->GetIsolate();
- return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
+ return isolate->factory()->function_native_code_string();
}
@@ -13626,7 +13534,6 @@ bool SharedFunctionInfo::HasBreakInfo() const {
if (!HasDebugInfo()) return false;
DebugInfo* info = DebugInfo::cast(debug_info());
bool has_break_info = info->HasBreakInfo();
- DCHECK_IMPLIES(has_break_info, HasBytecodeArray());
return has_break_info;
}
@@ -14020,7 +13927,7 @@ void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
- Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
+ Assembler::FlushICache(instruction_start(), instruction_size());
}
@@ -14061,18 +13968,18 @@ void Code::CopyFrom(const CodeDesc& desc) {
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(GetIsolate(), code->instruction_start(),
+ it.rinfo()->set_target_address(code->instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(
- GetIsolate(), p, UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else {
intptr_t delta = instruction_start() - desc.buffer;
it.rinfo()->apply(delta);
}
}
- Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
+ Assembler::FlushICache(instruction_start(), instruction_size());
}
@@ -14081,11 +13988,31 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
return table.FindEntry(pc);
}
+int Code::OffHeapInstructionSize() {
+ DCHECK(Builtins::IsOffHeapBuiltin(this));
+ InstructionStream* stream =
+ InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
+ return static_cast<int>(stream->byte_length());
+}
+
+Address Code::OffHeapInstructionStart() {
+ DCHECK(Builtins::IsOffHeapBuiltin(this));
+ InstructionStream* stream =
+ InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
+ return stream->bytes();
+}
+
+Address Code::OffHeapInstructionEnd() {
+ DCHECK(Builtins::IsOffHeapBuiltin(this));
+ InstructionStream* stream =
+ InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
+ return stream->bytes() + stream->byte_length();
+}
namespace {
template <typename Code>
void SetStackFrameCacheCommon(Handle<Code> code,
- Handle<NumberDictionary> cache) {
+ Handle<SimpleNumberDictionary> cache) {
Handle<Object> maybe_table(code->source_position_table(), code->GetIsolate());
if (maybe_table->IsSourcePositionTableWithFrameCache()) {
Handle<SourcePositionTableWithFrameCache>::cast(maybe_table)
@@ -14103,7 +14030,7 @@ void SetStackFrameCacheCommon(Handle<Code> code,
// static
void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
- Handle<NumberDictionary> cache) {
+ Handle<SimpleNumberDictionary> cache) {
if (abstract_code->IsCode()) {
SetStackFrameCacheCommon(handle(abstract_code->GetCode()), cache);
} else {
@@ -14161,7 +14088,7 @@ int AbstractCode::SourceStatementPosition(int offset) {
}
void JSFunction::ClearTypeFeedbackInfo() {
- if (feedback_vector_cell()->value()->IsFeedbackVector()) {
+ if (feedback_cell()->value()->IsFeedbackVector()) {
FeedbackVector* vector = feedback_vector();
Isolate* isolate = GetIsolate();
if (vector->ClearSlots(isolate)) {
@@ -14285,30 +14212,6 @@ Code* Code::OptimizedCodeIterator::Next() {
return code;
}
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
-const char* Code::ICState2String(InlineCacheState state) {
- switch (state) {
- case UNINITIALIZED:
- return "UNINITIALIZED";
- case PREMONOMORPHIC:
- return "PREMONOMORPHIC";
- case MONOMORPHIC:
- return "MONOMORPHIC";
- case RECOMPUTE_HANDLER:
- return "RECOMPUTE_HANDLER";
- case POLYMORPHIC:
- return "POLYMORPHIC";
- case MEGAMORPHIC:
- return "MEGAMORPHIC";
- case GENERIC:
- return "GENERIC";
- }
- UNREACHABLE();
-}
-
-#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
#ifdef ENABLE_DISASSEMBLER
namespace {
@@ -14537,34 +14440,6 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
-void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
- os << " from to hdlr\n";
- for (int i = 0; i < length(); i += kRangeEntrySize) {
- int pc_start = Smi::ToInt(get(i + kRangeStartIndex));
- int pc_end = Smi::ToInt(get(i + kRangeEndIndex));
- int handler_field = Smi::ToInt(get(i + kRangeHandlerIndex));
- int handler_offset = HandlerOffsetField::decode(handler_field);
- CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int data = Smi::ToInt(get(i + kRangeDataIndex));
- os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
- << ") -> " << std::setw(4) << handler_offset
- << " (prediction=" << prediction << ", data=" << data << ")\n";
- }
-}
-
-
-void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
- os << " off hdlr (c)\n";
- for (int i = 0; i < length(); i += kReturnEntrySize) {
- int pc_offset = Smi::ToInt(get(i + kReturnOffsetIndex));
- int handler_field = Smi::ToInt(get(i + kReturnHandlerIndex));
- int handler_offset = HandlerOffsetField::decode(handler_field);
- CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
- << handler_offset << " (prediction=" << prediction << ")\n";
- }
-}
-
void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "kind = " << Kind2String(kind()) << "\n";
if (is_stub()) {
@@ -14668,10 +14543,11 @@ void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "\n";
}
- if (handler_table()->length() > 0) {
- os << "Handler Table (size = " << handler_table()->Size() << ")\n";
+ if (handler_table_offset() > 0) {
+ HandlerTable table(this);
+ os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
if (kind() == OPTIMIZED_FUNCTION) {
- HandlerTable::cast(handler_table())->HandlerTableReturnPrint(os);
+ table.HandlerTableReturnPrint(os);
}
os << "\n";
}
@@ -14744,10 +14620,11 @@ void BytecodeArray::Disassemble(std::ostream& os) {
}
#endif
- os << "Handler Table (size = " << handler_table()->Size() << ")\n";
+ os << "Handler Table (size = " << handler_table()->length() << ")\n";
#ifdef ENABLE_DISASSEMBLER
if (handler_table()->length() > 0) {
- HandlerTable::cast(handler_table())->HandlerTableRangePrint(os);
+ HandlerTable table(this);
+ table.HandlerTableRangePrint(os);
}
#endif
}
@@ -15720,7 +15597,7 @@ int JSObject::GetFastElementsUsage() {
: store->length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
store = SloppyArgumentsElements::cast(store)->arguments();
- // Fall through.
+ V8_FALLTHROUGH;
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
@@ -15938,6 +15815,11 @@ v8::Promise::PromiseState JSPromise::status() const {
return static_cast<v8::Promise::PromiseState>(value);
}
+void JSPromise::set_status(Promise::PromiseState status) {
+ int value = flags() & ~kStatusMask;
+ set_flags(value | status);
+}
+
// static
const char* JSPromise::Status(v8::Promise::PromiseState status) {
switch (status) {
@@ -15951,6 +15833,217 @@ const char* JSPromise::Status(v8::Promise::PromiseState status) {
UNREACHABLE();
}
+// static
+Handle<Object> JSPromise::Fulfill(Handle<JSPromise> promise,
+ Handle<Object> value) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ // 1. Assert: The value of promise.[[PromiseState]] is "pending".
+ DCHECK_EQ(Promise::kPending, promise->status());
+
+ // 2. Let reactions be promise.[[PromiseFulfillReactions]].
+ Handle<Object> reactions(promise->reactions(), isolate);
+
+ // 3. Set promise.[[PromiseResult]] to value.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise->set_reactions_or_result(*value);
+
+ // 6. Set promise.[[PromiseState]] to "fulfilled".
+ promise->set_status(Promise::kFulfilled);
+
+ // 7. Return TriggerPromiseReactions(reactions, value).
+ return TriggerPromiseReactions(isolate, reactions, value,
+ PromiseReaction::kFulfill);
+}
+
+// static
+Handle<Object> JSPromise::Reject(Handle<JSPromise> promise,
+ Handle<Object> reason, bool debug_event) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ if (debug_event) isolate->debug()->OnPromiseReject(promise, reason);
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+
+ // 1. Assert: The value of promise.[[PromiseState]] is "pending".
+ DCHECK_EQ(Promise::kPending, promise->status());
+
+ // 2. Let reactions be promise.[[PromiseRejectReactions]].
+ Handle<Object> reactions(promise->reactions(), isolate);
+
+ // 3. Set promise.[[PromiseResult]] to reason.
+ // 4. Set promise.[[PromiseFulfillReactions]] to undefined.
+ // 5. Set promise.[[PromiseRejectReactions]] to undefined.
+ promise->set_reactions_or_result(*reason);
+
+ // 6. Set promise.[[PromiseState]] to "rejected".
+ promise->set_status(Promise::kRejected);
+
+ // 7. If promise.[[PromiseIsHandled]] is false, perform
+ // HostPromiseRejectionTracker(promise, "reject").
+ if (!promise->has_handler()) {
+ isolate->ReportPromiseReject(promise, reason, kPromiseRejectWithNoHandler);
+ }
+
+ // 8. Return TriggerPromiseReactions(reactions, reason).
+ return TriggerPromiseReactions(isolate, reactions, reason,
+ PromiseReaction::kReject);
+}
+
+// static
+MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
+ Handle<Object> resolution) {
+ Isolate* const isolate = promise->GetIsolate();
+
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+
+ // 6. If SameValue(resolution, promise) is true, then
+ if (promise.is_identical_to(resolution)) {
+ // a. Let selfResolutionError be a newly created TypeError object.
+ Handle<Object> self_resolution_error = isolate->factory()->NewTypeError(
+ MessageTemplate::kPromiseCyclic, resolution);
+ // b. Return RejectPromise(promise, selfResolutionError).
+ return Reject(promise, self_resolution_error);
+ }
+
+ // 7. If Type(resolution) is not Object, then
+ if (!resolution->IsJSReceiver()) {
+ // a. Return FulfillPromise(promise, resolution).
+ return Fulfill(promise, resolution);
+ }
+
+ // 8. Let then be Get(resolution, "then").
+ MaybeHandle<Object> then;
+ if (isolate->IsPromiseThenLookupChainIntact(
+ Handle<JSReceiver>::cast(resolution))) {
+ // We can skip the "then" lookup on {resolution} if its [[Prototype]]
+ // is the (initial) Promise.prototype and the Promise#then protector
+ // is intact, as that guards the lookup path for the "then" property
+ // on JSPromise instances which have the (initial) %PromisePrototype%.
+ then = isolate->promise_then();
+ } else {
+ then = JSReceiver::GetProperty(Handle<JSReceiver>::cast(resolution),
+ isolate->factory()->then_string());
+ }
+
+ // 9. If then is an abrupt completion, then
+ Handle<Object> then_action;
+ if (!then.ToHandle(&then_action)) {
+ // a. Return RejectPromise(promise, then.[[Value]]).
+ Handle<Object> reason(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+ return Reject(promise, reason, false);
+ }
+
+ // 10. Let thenAction be then.[[Value]].
+ // 11. If IsCallable(thenAction) is false, then
+ if (!then_action->IsCallable()) {
+ // a. Return FulfillPromise(promise, resolution).
+ return Fulfill(promise, resolution);
+ }
+
+ // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob,
+ // «promise, resolution, thenAction»).
+ Handle<PromiseResolveThenableJobTask> task =
+ isolate->factory()->NewPromiseResolveThenableJobTask(
+ promise, Handle<JSReceiver>::cast(then_action),
+ Handle<JSReceiver>::cast(resolution), isolate->native_context());
+ if (isolate->debug()->is_active() && resolution->IsJSPromise()) {
+ // Mark the dependency of the new {promise} on the {resolution}.
+ Object::SetProperty(resolution,
+ isolate->factory()->promise_handled_by_symbol(),
+ promise, LanguageMode::kStrict)
+ .Check();
+ }
+ isolate->EnqueueMicrotask(task);
+
+ // 13. Return undefined.
+ return isolate->factory()->undefined_value();
+}
+
+// static
+MaybeHandle<JSPromise> JSPromise::From(Handle<HeapObject> object) {
+ Isolate* const isolate = object->GetIsolate();
+ if (object->IsJSPromise()) {
+ return Handle<JSPromise>::cast(object);
+ } else if (object->IsPromiseCapability()) {
+ Handle<PromiseCapability> capability =
+ Handle<PromiseCapability>::cast(object);
+ if (capability->promise()->IsJSPromise()) {
+ return handle(JSPromise::cast(capability->promise()), isolate);
+ }
+ } else if (object->IsJSGeneratorObject()) {
+ Handle<JSGeneratorObject> generator =
+ Handle<JSGeneratorObject>::cast(object);
+ Handle<Object> handled_by = JSObject::GetDataProperty(
+ generator, isolate->factory()->generator_outer_promise_symbol());
+ if (handled_by->IsJSPromise()) return Handle<JSPromise>::cast(handled_by);
+ }
+ return MaybeHandle<JSPromise>();
+}
+
+// static
+Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
+ Handle<Object> reactions,
+ Handle<Object> argument,
+ PromiseReaction::Type type) {
+ DCHECK(reactions->IsSmi() || reactions->IsPromiseReaction());
+
+ // We need to reverse the {reactions} here, since we record them
+ // on the JSPromise in the reverse order.
+ {
+ DisallowHeapAllocation no_gc;
+ Object* current = *reactions;
+ Object* reversed = Smi::kZero;
+ while (!current->IsSmi()) {
+ Object* next = PromiseReaction::cast(current)->next();
+ PromiseReaction::cast(current)->set_next(reversed);
+ reversed = current;
+ current = next;
+ }
+ reactions = handle(reversed, isolate);
+ }
+
+ // Morph the {reactions} into PromiseReactionJobTasks
+ // and push them onto the microtask queue.
+ while (!reactions->IsSmi()) {
+ Handle<HeapObject> task = Handle<HeapObject>::cast(reactions);
+ Handle<PromiseReaction> reaction = Handle<PromiseReaction>::cast(task);
+ reactions = handle(reaction->next(), isolate);
+
+ STATIC_ASSERT(PromiseReaction::kSize == PromiseReactionJobTask::kSize);
+ if (type == PromiseReaction::kFulfill) {
+ task->synchronized_set_map(
+ isolate->heap()->promise_fulfill_reaction_job_task_map());
+ Handle<PromiseFulfillReactionJobTask>::cast(task)->set_argument(
+ *argument);
+ Handle<PromiseFulfillReactionJobTask>::cast(task)->set_context(
+ *isolate->native_context());
+ STATIC_ASSERT(PromiseReaction::kFulfillHandlerOffset ==
+ PromiseFulfillReactionJobTask::kHandlerOffset);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseFulfillReactionJobTask::kPayloadOffset);
+ } else {
+ DisallowHeapAllocation no_gc;
+ HeapObject* handler = reaction->reject_handler();
+ task->synchronized_set_map(
+ isolate->heap()->promise_reject_reaction_job_task_map());
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_argument(*argument);
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_context(
+ *isolate->native_context());
+ Handle<PromiseRejectReactionJobTask>::cast(task)->set_handler(handler);
+ STATIC_ASSERT(PromiseReaction::kPayloadOffset ==
+ PromiseRejectReactionJobTask::kPayloadOffset);
+ }
+
+ isolate->EnqueueMicrotask(Handle<PromiseReactionJobTask>::cast(task));
+ }
+
+ return isolate->factory()->undefined_value();
+}
+
namespace {
JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
@@ -16466,8 +16559,6 @@ template class HashTable<ObjectHashTable, ObjectHashTableShape>;
template class HashTable<WeakHashTable, WeakHashTableShape>;
-template class HashTable<TemplateMap, TemplateMapShape>;
-
template class Dictionary<NameDictionary, NameDictionaryShape>;
template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
@@ -16478,6 +16569,12 @@ template class EXPORT_TEMPLATE_DEFINE(
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Dictionary<NumberDictionary, NumberDictionaryShape>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::New(
Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
@@ -16490,6 +16587,11 @@ template Handle<NumberDictionary>
Dictionary<NumberDictionary, NumberDictionaryShape>::AtPut(
Handle<NumberDictionary>, uint32_t, Handle<Object>, PropertyDetails);
+template Handle<SimpleNumberDictionary>
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::AtPut(
+ Handle<SimpleNumberDictionary>, uint32_t, Handle<Object>,
+ PropertyDetails);
+
template Object* Dictionary<
NumberDictionary, NumberDictionaryShape>::SlowReverseLookup(Object* value);
@@ -16504,6 +16606,10 @@ template Handle<NumberDictionary>
Dictionary<NumberDictionary, NumberDictionaryShape>::DeleteEntry(
Handle<NumberDictionary>, int);
+template Handle<SimpleNumberDictionary>
+Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::DeleteEntry(
+ Handle<SimpleNumberDictionary>, int);
+
template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape>::New(Isolate*, int,
PretenureFlag,
@@ -16537,6 +16643,11 @@ template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::EnsureCapacity(
Handle<NameDictionary>, int);
+template Handle<SimpleNumberDictionary>
+Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>::Add(
+ Handle<SimpleNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
+ int*);
+
template int Dictionary<GlobalDictionary,
GlobalDictionaryShape>::NumberOfEnumerableProperties();
@@ -16692,63 +16803,6 @@ size_t JSTypedArray::element_size() {
}
}
-// static
-MaybeHandle<JSTypedArray> JSTypedArray::Create(Isolate* isolate,
- Handle<Object> default_ctor,
- int argc, Handle<Object>* argv,
- const char* method_name) {
- // 1. Let newTypedArray be ? Construct(constructor, argumentList).
- Handle<Object> new_obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, new_obj,
- Execution::New(isolate, default_ctor, argc, argv),
- JSTypedArray);
-
- // 2. Perform ? ValidateTypedArray(newTypedArray).
- Handle<JSTypedArray> new_array;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, new_array, JSTypedArray::Validate(isolate, new_obj, method_name),
- JSTypedArray);
-
- // 3. If argumentList is a List of a single Number, then
- // If newTypedArray.[[ArrayLength]] < size, throw a TypeError exception.
- DCHECK_IMPLIES(argc == 1, argv[0]->IsSmi());
- if (argc == 1 && new_array->length_value() < argv[0]->Number()) {
- const MessageTemplate::Template message =
- MessageTemplate::kTypedArrayTooShort;
- THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
- }
-
- // 4. Return newTypedArray.
- return new_array;
-}
-
-// static
-MaybeHandle<JSTypedArray> JSTypedArray::SpeciesCreate(
- Isolate* isolate, Handle<JSTypedArray> exemplar, int argc,
- Handle<Object>* argv, const char* method_name) {
- // 1. Assert: exemplar is an Object that has a [[TypedArrayName]] internal
- // slot.
- DCHECK(exemplar->IsJSTypedArray());
-
- // 2. Let defaultConstructor be the intrinsic object listed in column one of
- // Table 51 for exemplar.[[TypedArrayName]].
- Handle<JSFunction> default_ctor =
- JSTypedArray::DefaultConstructor(isolate, exemplar);
-
- // 3. Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- Handle<Object> ctor = default_ctor;
- if (!exemplar->HasJSTypedArrayPrototype(isolate) ||
- !isolate->IsArraySpeciesLookupChainIntact()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, ctor,
- Object::SpeciesConstructor(isolate, exemplar, default_ctor),
- JSTypedArray);
- }
-
- // 4. Return ? TypedArrayCreate(constructor, argumentList).
- return Create(isolate, ctor, argc, argv, method_name);
-}
-
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
// Regardless of whether the property is there or not invalidate
@@ -17235,8 +17289,9 @@ int SearchLiteralsMapEntry(CompilationCacheTable* cache, int cache_entry,
return -1;
}
-void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
- Handle<Context> native_context, Handle<Cell> literals) {
+void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
+ Handle<Context> native_context,
+ Handle<FeedbackCell> feedback_cell) {
Isolate* isolate = native_context->GetIsolate();
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kLiteralEntryLength == 2);
@@ -17255,7 +17310,7 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
if (entry >= 0) {
// Just set the code of the entry.
Handle<WeakCell> literals_cell =
- isolate->factory()->NewWeakCell(literals);
+ isolate->factory()->NewWeakCell(feedback_cell);
old_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
return;
}
@@ -17280,7 +17335,8 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
}
}
- Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+ Handle<WeakCell> literals_cell =
+ isolate->factory()->NewWeakCell(feedback_cell);
WeakCell* context_cell = native_context->self_weak_cell();
new_literals_map->set(entry + kLiteralContextOffset, context_cell);
@@ -17292,7 +17348,7 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
WeakCell::cast(new_literals_map->get(i + kLiteralContextOffset));
DCHECK(cell->cleared() || cell->value()->IsNativeContext());
cell = WeakCell::cast(new_literals_map->get(i + kLiteralLiteralsOffset));
- DCHECK(cell->cleared() || (cell->value()->IsCell()));
+ DCHECK(cell->cleared() || (cell->value()->IsFeedbackCell()));
}
#endif
@@ -17302,9 +17358,9 @@ void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
}
}
-Cell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
- Context* native_context) {
- Cell* result = nullptr;
+FeedbackCell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
+ Context* native_context) {
+ FeedbackCell* result = nullptr;
int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
if (entry >= 0) {
FixedArray* literals_map = FixedArray::cast(cache->get(cache_entry));
@@ -17312,37 +17368,33 @@ Cell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
WeakCell* cell =
WeakCell::cast(literals_map->get(entry + kLiteralLiteralsOffset));
- result = cell->cleared() ? nullptr : Cell::cast(cell->value());
+ result = cell->cleared() ? nullptr : FeedbackCell::cast(cell->value());
}
- DCHECK(result == nullptr || result->IsCell());
+ DCHECK(result == nullptr || result->IsFeedbackCell());
return result;
}
} // namespace
-InfoVectorPair CompilationCacheTable::LookupScript(Handle<String> src,
- Handle<Context> context,
- LanguageMode language_mode) {
- InfoVectorPair empty_result;
+MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
+ Handle<String> src, Handle<Context> context, LanguageMode language_mode) {
Handle<SharedFunctionInfo> shared(context->closure()->shared());
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
int entry = FindEntry(&key);
- if (entry == kNotFound) return empty_result;
+ if (entry == kNotFound) return MaybeHandle<SharedFunctionInfo>();
int index = EntryToIndex(entry);
- if (!get(index)->IsFixedArray()) return empty_result;
+ if (!get(index)->IsFixedArray()) return MaybeHandle<SharedFunctionInfo>();
Object* obj = get(index + 1);
if (obj->IsSharedFunctionInfo()) {
- Cell* literals =
- SearchLiteralsMap(this, index + 2, context->native_context());
- return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+ return handle(SharedFunctionInfo::cast(obj));
}
- return empty_result;
+ return MaybeHandle<SharedFunctionInfo>();
}
-InfoVectorPair CompilationCacheTable::LookupEval(
+InfoCellPair CompilationCacheTable::LookupEval(
Handle<String> src, Handle<SharedFunctionInfo> outer_info,
Handle<Context> native_context, LanguageMode language_mode, int position) {
- InfoVectorPair empty_result;
+ InfoCellPair empty_result;
StringSharedKey key(src, outer_info, language_mode, position);
int entry = FindEntry(&key);
if (entry == kNotFound) return empty_result;
@@ -17350,9 +17402,9 @@ InfoVectorPair CompilationCacheTable::LookupEval(
if (!get(index)->IsFixedArray()) return empty_result;
Object* obj = get(EntryToIndex(entry) + 1);
if (obj->IsSharedFunctionInfo()) {
- Cell* literals =
+ FeedbackCell* feedback_cell =
SearchLiteralsMap(this, EntryToIndex(entry) + 2, *native_context);
- return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+ return InfoCellPair(SharedFunctionInfo::cast(obj), feedback_cell);
}
return empty_result;
}
@@ -17386,7 +17438,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::Put(
Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value, Handle<Cell> literals) {
+ Handle<SharedFunctionInfo> value) {
Isolate* isolate = cache->GetIsolate();
Handle<SharedFunctionInfo> shared(context->closure()->shared());
Handle<Context> native_context(context->native_context());
@@ -17396,7 +17448,6 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
int entry = cache->FindInsertionEntry(key.Hash());
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
- AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context, literals);
cache->ElementAdded();
return cache;
}
@@ -17404,7 +17455,8 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
- Handle<Context> native_context, Handle<Cell> literals, int position) {
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position) {
Isolate* isolate = cache->GetIsolate();
StringSharedKey key(src, outer_info, value->language_mode(), position);
{
@@ -17413,11 +17465,11 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
if (entry != kNotFound) {
cache->set(EntryToIndex(entry), *k);
cache->set(EntryToIndex(entry) + 1, *value);
- // AddToLiteralsMap may allocate a new sub-array to live in the entry,
- // but it won't change the cache array. Therefore EntryToIndex and
- // entry remains correct.
- AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context,
- literals);
+ // AddToFeedbackCellsMap may allocate a new sub-array to live in the
+ // entry, but it won't change the cache array. Therefore EntryToIndex
+ // and entry remains correct.
+ AddToFeedbackCellsMap(cache, EntryToIndex(entry) + 2, native_context,
+ feedback_cell);
return cache;
}
}
@@ -17621,6 +17673,13 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(Handle<Derived> dictionary,
return dictionary;
}
+// static
+Handle<SimpleNumberDictionary> SimpleNumberDictionary::Set(
+ Handle<SimpleNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value) {
+ return AtPut(dictionary, key, value, PropertyDetails::Empty());
+}
+
bool NumberDictionary::HasComplexElements() {
if (!requires_slow_elements()) return false;
Isolate* isolate = this->GetIsolate();
@@ -19004,10 +19063,11 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
// actually a buffer we are tracking.
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
allocation.length);
+ CHECK(FreePages(allocation.allocation_base, allocation.length));
+ } else {
+ isolate->array_buffer_allocator()->Free(allocation.allocation_base,
+ allocation.length);
}
-
- isolate->array_buffer_allocator()->Free(allocation.allocation_base,
- allocation.length, allocation.mode);
}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
@@ -19105,6 +19165,10 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
+ // This code does not know how to materialize from a buffer with guard
+ // regions.
+ DCHECK(!buffer->has_guard_region());
+
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
fixed_typed_array->DataSize());
@@ -19139,7 +19203,8 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
GetIsolate());
if (array_buffer->was_neutered() ||
- array_buffer->backing_store() != nullptr) {
+ array_buffer->backing_store() != nullptr ||
+ array_buffer->has_guard_region()) {
return array_buffer;
}
Handle<JSTypedArray> self(this);
@@ -19218,12 +19283,12 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
return PropertyCellType::kConstant;
case PropertyCellType::kConstant:
if (*value == cell->value()) return PropertyCellType::kConstant;
- // Fall through.
+ V8_FALLTHROUGH;
case PropertyCellType::kConstantType:
if (RemainsConstantType(cell, value)) {
return PropertyCellType::kConstantType;
}
- // Fall through.
+ V8_FALLTHROUGH;
case PropertyCellType::kMutable:
return PropertyCellType::kMutable;
}
@@ -19380,7 +19445,7 @@ ElementsKind JSArrayIterator::ElementsKindForInstanceType(InstanceType type) {
DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
}
- if (type <= JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) {
+ if (type <= JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE) {
kind =
static_cast<ElementsKind>(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
(type - FIRST_ARRAY_VALUE_ITERATOR_TYPE));
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index c4e3d972e1..a9da77fce3 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -140,8 +140,7 @@
// - SharedFunctionInfo
// - Struct
// - AccessorInfo
-// - PromiseResolveThenableJobInfo
-// - PromiseReactionJobInfo
+// - PromiseReaction
// - PromiseCapability
// - AccessorPair
// - AccessCheckInfo
@@ -159,10 +158,18 @@
// - SourcePositionTableWithFrameCache
// - CodeCache
// - PrototypeInfo
+// - Microtask
+// - CallbackTask
+// - CallableTask
+// - PromiseReactionJobTask
+// - PromiseFulfillReactionJobTask
+// - PromiseRejectReactionJobTask
+// - PromiseResolveThenableJobTask
// - Module
// - ModuleInfoEntry
// - PreParsedScopeData
// - WeakCell
+// - FeedbackCell
// - FeedbackVector
//
// Formats of Object*:
@@ -184,7 +191,7 @@ enum KeyedAccessStoreMode {
STANDARD_STORE,
STORE_TRANSITION_TO_OBJECT,
STORE_TRANSITION_TO_DOUBLE,
- STORE_AND_GROW_NO_TRANSITION,
+ STORE_AND_GROW_NO_TRANSITION_HANDLE_COW,
STORE_AND_GROW_TRANSITION_TO_OBJECT,
STORE_AND_GROW_TRANSITION_TO_DOUBLE,
STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
@@ -204,21 +211,25 @@ static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
store_mode == STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
+static inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
+ return store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
+}
static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
KeyedAccessStoreMode store_mode) {
if (store_mode >= STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
return store_mode;
}
- if (store_mode >= STORE_AND_GROW_NO_TRANSITION) {
- return STORE_AND_GROW_NO_TRANSITION;
+ if (store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW) {
+ return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
}
return STANDARD_STORE;
}
static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode >= STORE_AND_GROW_NO_TRANSITION &&
+ return store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW &&
store_mode <= STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
@@ -345,6 +356,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_FLOAT32_ARRAY_TYPE) \
V(FIXED_FLOAT64_ARRAY_TYPE) \
V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ V(FIXED_BIGINT64_ARRAY_TYPE) \
+ V(FIXED_BIGUINT64_ARRAY_TYPE) \
\
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(FILLER_TYPE) \
@@ -363,21 +376,29 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(MODULE_INFO_ENTRY_TYPE) \
V(MODULE_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(PROMISE_REACTION_JOB_INFO_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
+ V(PROMISE_CAPABILITY_TYPE) \
+ V(PROMISE_REACTION_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
\
+ V(CALLABLE_TASK_TYPE) \
+ V(CALLBACK_TASK_TYPE) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
+ \
V(FIXED_ARRAY_TYPE) \
V(DESCRIPTOR_ARRAY_TYPE) \
V(HASH_TABLE_TYPE) \
+ V(SCOPE_INFO_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
\
V(CELL_TYPE) \
V(CODE_DATA_CONTAINER_TYPE) \
+ V(FEEDBACK_CELL_TYPE) \
V(FEEDBACK_VECTOR_TYPE) \
V(LOAD_HANDLER_TYPE) \
V(PROPERTY_ARRAY_TYPE) \
@@ -500,6 +521,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
\
V(JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
@@ -518,6 +541,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE) \
\
V(JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
@@ -551,15 +576,21 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
V(MODULE, Module, module) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
- promise_reaction_job_info) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
- promise_resolve_thenable_job_info) \
+ V(PROMISE_CAPABILITY, PromiseCapability, promise_capability) \
+ V(PROMISE_REACTION, PromiseReaction, promise_reaction) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
V(SCRIPT, Script, script) \
V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(TUPLE2, Tuple2, tuple2) \
- V(TUPLE3, Tuple3, tuple3)
+ V(TUPLE3, Tuple3, tuple3) \
+ V(CALLABLE_TASK, CallableTask, callable_task) \
+ V(CALLBACK_TASK, CallbackTask, callback_task) \
+ V(PROMISE_FULFILL_REACTION_JOB_TASK, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(PROMISE_REJECT_REACTION_JOB_TASK, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_TASK, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task)
#define DATA_HANDLER_LIST(V) \
V(LOAD_HANDLER, LoadHandler, 1, load_handler1) \
@@ -714,7 +745,9 @@ enum InstanceType : uint16_t {
FIXED_UINT32_ARRAY_TYPE,
FIXED_FLOAT32_ARRAY_TYPE,
FIXED_FLOAT64_ARRAY_TYPE,
- FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_UINT8_CLAMPED_ARRAY_TYPE,
+ FIXED_BIGINT64_ARRAY_TYPE,
+ FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
@@ -733,23 +766,31 @@ enum InstanceType : uint16_t {
MODULE_INFO_ENTRY_TYPE,
MODULE_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
- PROMISE_REACTION_JOB_INFO_TYPE,
- PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
+ PROMISE_CAPABILITY_TYPE,
+ PROMISE_REACTION_TYPE,
PROTOTYPE_INFO_TYPE,
SCRIPT_TYPE,
STACK_FRAME_INFO_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
+ CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
+ CALLBACK_TASK_TYPE,
+ PROMISE_FULFILL_REACTION_JOB_TASK_TYPE,
+ PROMISE_REJECT_REACTION_JOB_TASK_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+
// FixedArrays.
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
DESCRIPTOR_ARRAY_TYPE,
HASH_TABLE_TYPE,
+ SCOPE_INFO_TYPE,
TRANSITION_ARRAY_TYPE, // LAST_FIXED_ARRAY_TYPE
// Misc.
CELL_TYPE,
CODE_DATA_CONTAINER_TYPE,
+ FEEDBACK_CELL_TYPE,
FEEDBACK_VECTOR_TYPE,
LOAD_HANDLER_TYPE,
PROPERTY_ARRAY_TYPE,
@@ -830,9 +871,12 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of FixedArray.
FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
LAST_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
+ // Boundaries for testing if given HeapObject is a subclass of Microtask.
+ FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
+ LAST_MICROTASK_TYPE = PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
// Boundaries for testing for a fixed typed array.
FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
- LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
+ LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE,
// Boundary for promotion to old space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -907,6 +951,7 @@ class FixedArrayBase;
class PropertyArray;
class FunctionLiteral;
class JSGlobalObject;
+class JSPromise;
class KeyAccumulator;
class LayoutDescriptor;
class LookupIterator;
@@ -921,12 +966,12 @@ class RootVisitor;
class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
+class FeedbackCell;
class FeedbackMetadata;
class FeedbackVector;
class WeakCell;
class TransitionArray;
class TemplateList;
-class TemplateMap;
template <typename T>
class ZoneForwardList;
@@ -954,12 +999,13 @@ template <class C> inline bool Is(Object* obj);
V(BigInt) \
V(BoilerplateDescription) \
V(Boolean) \
+ V(BooleanWrapper) \
V(BreakPoint) \
V(BreakPointInfo) \
V(ByteArray) \
V(BytecodeArray) \
- V(Callable) \
V(CallHandlerInfo) \
+ V(Callable) \
V(Cell) \
V(ClassBoilerplate) \
V(Code) \
@@ -979,12 +1025,15 @@ template <class C> inline bool Is(Object* obj);
V(ExternalOneByteString) \
V(ExternalString) \
V(ExternalTwoByteString) \
+ V(FeedbackCell) \
V(FeedbackMetadata) \
V(FeedbackVector) \
V(Filler) \
V(FixedArray) \
V(FixedArrayBase) \
V(FixedArrayExact) \
+ V(FixedBigInt64Array) \
+ V(FixedBigUint64Array) \
V(FixedDoubleArray) \
V(FixedFloat32Array) \
V(FixedFloat64Array) \
@@ -1042,30 +1091,34 @@ template <class C> inline bool Is(Object* obj);
V(LoadHandler) \
V(Map) \
V(MapCache) \
+ V(Microtask) \
V(ModuleInfo) \
V(MutableHeapNumber) \
V(Name) \
V(NameDictionary) \
V(NativeContext) \
V(NormalizedMapCache) \
+ V(NumberDictionary) \
+ V(NumberWrapper) \
V(ObjectHashSet) \
V(ObjectHashTable) \
V(Oddball) \
V(OrderedHashMap) \
V(OrderedHashSet) \
V(PreParsedScopeData) \
- V(PromiseCapability) \
+ V(PromiseReactionJobTask) \
V(PropertyArray) \
V(PropertyCell) \
V(PropertyDescriptorObject) \
V(RegExpMatchInfo) \
V(ScopeInfo) \
V(ScriptContextTable) \
- V(NumberDictionary) \
+ V(ScriptWrapper) \
V(SeqOneByteString) \
V(SeqString) \
V(SeqTwoByteString) \
V(SharedFunctionInfo) \
+ V(SimpleNumberDictionary) \
V(SlicedString) \
V(SloppyArgumentsElements) \
V(SmallOrderedHashMap) \
@@ -1078,9 +1131,9 @@ template <class C> inline bool Is(Object* obj);
V(StringWrapper) \
V(Struct) \
V(Symbol) \
+ V(SymbolWrapper) \
V(TemplateInfo) \
V(TemplateList) \
- V(TemplateMap) \
V(TemplateObjectDescription) \
V(ThinString) \
V(TransitionArray) \
@@ -1210,8 +1263,6 @@ class Object {
// implementation of a JSObject's elements.
inline bool HasValidElements();
- inline bool HasSpecificClassOf(String* name);
-
bool BooleanValue(); // ECMA-262 9.2.
// ES6 section 7.2.11 Abstract Relational Comparison
@@ -1304,34 +1355,10 @@ class Object {
// ES6 section 12.5.6 The typeof Operator
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
- // ES6 section 12.6 Multiplicative Operators
- MUST_USE_RESULT static MaybeHandle<Object> Multiply(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> Divide(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> Modulus(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
-
// ES6 section 12.7 Additive Operators
MUST_USE_RESULT static MaybeHandle<Object> Add(Isolate* isolate,
Handle<Object> lhs,
Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> Subtract(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
-
- // ES6 section 12.8 Bitwise Shift Operators
- MUST_USE_RESULT static MaybeHandle<Object> ShiftLeft(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> ShiftRight(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> ShiftRightLogical(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs);
// ES6 section 12.9 Relational Operators
MUST_USE_RESULT static inline Maybe<bool> GreaterThan(Handle<Object> x,
@@ -1343,17 +1370,6 @@ class Object {
MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(Handle<Object> x,
Handle<Object> y);
- // ES6 section 12.11 Binary Bitwise Operators
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseAnd(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseOr(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseXor(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
-
// ES6 section 7.3.19 OrdinaryHasInstance (C, O).
MUST_USE_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
Isolate* isolate, Handle<Object> callable, Handle<Object> object);
@@ -1800,7 +1816,7 @@ class HeapObject: public Object {
static void VerifyHeapPointer(Object* p);
#endif
- inline AllocationAlignment RequiredAlignment() const;
+ static inline AllocationAlignment RequiredAlignment(Map* map);
// Whether the object needs rehashing. That is the case if the object's
// content depends on FLAG_hash_seed. When the object is deserialized into
@@ -1900,7 +1916,10 @@ enum AccessorComponent {
ACCESSOR_SETTER
};
-enum class GetKeysConversion { kKeepNumbers, kConvertToString };
+enum class GetKeysConversion {
+ kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
+ kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString)
+};
enum class KeyCollectionMode {
kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
@@ -2182,10 +2201,12 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
- Handle<JSReceiver> object, PropertyFilter filter);
+ Handle<JSReceiver> object, PropertyFilter filter,
+ bool try_fast_path = true);
static const int kHashMask = PropertyArray::HashField::kMask;
@@ -2273,6 +2294,8 @@ class JSObject: public JSReceiver {
inline bool HasFixedUint32Elements();
inline bool HasFixedFloat32Elements();
inline bool HasFixedFloat64Elements();
+ inline bool HasFixedBigInt64Elements();
+ inline bool HasFixedBigUint64Elements();
inline bool HasFastArgumentsElements();
inline bool HasSlowArgumentsElements();
@@ -2872,86 +2895,6 @@ class Tuple3 : public Tuple2 {
DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple3);
};
-class PromiseCapability : public Tuple3 {
- public:
- DECL_CAST(PromiseCapability)
- DECL_PRINTER(PromiseCapability)
- DECL_VERIFIER(PromiseCapability)
-
- DECL_ACCESSORS(promise, Object)
- DECL_ACCESSORS(resolve, Object)
- DECL_ACCESSORS(reject, Object)
-
- static const int kPromiseOffset = Tuple3::kValue1Offset;
- static const int kResolveOffset = Tuple3::kValue2Offset;
- static const int kRejectOffset = Tuple3::kValue3Offset;
- static const int kSize = Tuple3::kSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseCapability);
-};
-
-// A container struct to hold state required for PromiseResolveThenableJob.
-class PromiseResolveThenableJobInfo : public Struct {
- public:
- DECL_ACCESSORS(thenable, JSReceiver)
- DECL_ACCESSORS(then, JSReceiver)
- DECL_ACCESSORS(resolve, JSFunction)
- DECL_ACCESSORS(reject, JSFunction)
-
- DECL_ACCESSORS(context, Context)
-
- static const int kThenableOffset = Struct::kHeaderSize;
- static const int kThenOffset = kThenableOffset + kPointerSize;
- static const int kResolveOffset = kThenOffset + kPointerSize;
- static const int kRejectOffset = kResolveOffset + kPointerSize;
- static const int kContextOffset = kRejectOffset + kPointerSize;
- static const int kSize = kContextOffset + kPointerSize;
-
- DECL_CAST(PromiseResolveThenableJobInfo)
- DECL_PRINTER(PromiseResolveThenableJobInfo)
- DECL_VERIFIER(PromiseResolveThenableJobInfo)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobInfo);
-};
-
-class JSPromise;
-
-// Struct to hold state required for PromiseReactionJob.
-class PromiseReactionJobInfo : public Struct {
- public:
- DECL_ACCESSORS(value, Object)
- DECL_ACCESSORS(tasks, Object)
-
- // Check comment in JSPromise for information on what state these
- // deferred fields could be in.
- DECL_ACCESSORS(deferred_promise, Object)
- DECL_ACCESSORS(deferred_on_resolve, Object)
- DECL_ACCESSORS(deferred_on_reject, Object)
-
- DECL_INT_ACCESSORS(debug_id)
-
- DECL_ACCESSORS(context, Context)
-
- static const int kValueOffset = Struct::kHeaderSize;
- static const int kTasksOffset = kValueOffset + kPointerSize;
- static const int kDeferredPromiseOffset = kTasksOffset + kPointerSize;
- static const int kDeferredOnResolveOffset =
- kDeferredPromiseOffset + kPointerSize;
- static const int kDeferredOnRejectOffset =
- kDeferredOnResolveOffset + kPointerSize;
- static const int kContextOffset = kDeferredOnRejectOffset + kPointerSize;
- static const int kSize = kContextOffset + kPointerSize;
-
- DECL_CAST(PromiseReactionJobInfo)
- DECL_PRINTER(PromiseReactionJobInfo)
- DECL_VERIFIER(PromiseReactionJobInfo)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobInfo);
-};
-
class AsyncGeneratorRequest : public Struct {
public:
// Holds an AsyncGeneratorRequest, or Undefined.
@@ -3128,8 +3071,8 @@ class ContextExtension : public Struct {
V(String.prototype, toString, StringToString) \
V(String.prototype, toUpperCase, StringToUpperCase) \
V(String.prototype, trim, StringTrim) \
- V(String.prototype, trimLeft, StringTrimLeft) \
- V(String.prototype, trimRight, StringTrimRight) \
+ V(String.prototype, trimLeft, StringTrimStart) \
+ V(String.prototype, trimRight, StringTrimEnd) \
V(String.prototype, valueOf, StringValueOf) \
V(String, fromCharCode, StringFromCharCode) \
V(String, fromCodePoint, StringFromCodePoint) \
@@ -3335,14 +3278,14 @@ class JSAsyncGeneratorObject : public JSGeneratorObject {
// undefined.
DECL_ACCESSORS(queue, HeapObject)
- // [awaited_promise]
- // A reference to the Promise of an AwaitExpression.
- DECL_ACCESSORS(awaited_promise, HeapObject)
+ // [is_awaiting]
+ // Whether or not the generator is currently awaiting.
+ DECL_INT_ACCESSORS(is_awaiting)
// Layout description.
static const int kQueueOffset = JSGeneratorObject::kSize;
- static const int kAwaitedPromiseOffset = kQueueOffset + kPointerSize;
- static const int kSize = kAwaitedPromiseOffset + kPointerSize;
+ static const int kIsAwaitingOffset = kQueueOffset + kPointerSize;
+ static const int kSize = kIsAwaitingOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncGeneratorObject);
@@ -3477,22 +3420,14 @@ class JSFunction: public JSObject {
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();
- // [feedback_vector_cell]: The feedback vector.
- DECL_ACCESSORS(feedback_vector_cell, Cell)
-
- enum FeedbackVectorState {
- TOP_LEVEL_SCRIPT_NEEDS_VECTOR,
- NEEDS_VECTOR,
- HAS_VECTOR,
- NO_VECTOR_NEEDED
- };
-
- inline FeedbackVectorState GetFeedbackVectorState(Isolate* isolate) const;
+ // [feedback_cell]: The FeedbackCell used to hold the FeedbackVector
+ // eventually.
+ DECL_ACCESSORS(feedback_cell, FeedbackCell)
// feedback_vector() can be used once the function is compiled.
inline FeedbackVector* feedback_vector() const;
inline bool has_feedback_vector() const;
- static void EnsureLiterals(Handle<JSFunction> function);
+ static void EnsureFeedbackVector(Handle<JSFunction> function);
// Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
@@ -3575,7 +3510,7 @@ class JSFunction: public JSObject {
/* Pointer fields. */ \
V(kSharedFunctionInfoOffset, kPointerSize) \
V(kContextOffset, kPointerSize) \
- V(kFeedbackVectorOffset, kPointerSize) \
+ V(kFeedbackCellOffset, kPointerSize) \
V(kEndOfStrongFieldsOffset, 0) \
V(kCodeOffset, kPointerSize) \
/* Size of JSFunction object without prototype field. */ \
@@ -3851,76 +3786,6 @@ class JSMessageObject: public JSObject {
typedef BodyDescriptor BodyDescriptorWeak;
};
-class JSPromise : public JSObject {
- public:
- DECL_ACCESSORS(result, Object)
-
- // There are 3 possible states for these fields --
- // 1) Undefined -- This is the zero state when there is no callback
- // or deferred fields registered.
- //
- // 2) Object -- There is a single callback directly attached to the
- // fulfill_reactions, reject_reactions and the deferred fields are
- // directly attached to the slots. In this state, deferred_promise
- // is a JSReceiver and deferred_on_{resolve, reject} are Callables.
- //
- // 3) FixedArray -- There is more than one callback and deferred
- // fields attached to a FixedArray.
- //
- // The callback can be a Callable or a Symbol.
- DECL_ACCESSORS(deferred_promise, Object)
- DECL_ACCESSORS(deferred_on_resolve, Object)
- DECL_ACCESSORS(deferred_on_reject, Object)
- DECL_ACCESSORS(fulfill_reactions, Object)
- DECL_ACCESSORS(reject_reactions, Object)
-
- DECL_INT_ACCESSORS(flags)
-
- // [has_handler]: Whether this promise has a reject handler or not.
- DECL_BOOLEAN_ACCESSORS(has_handler)
-
- // [handled_hint]: Whether this promise will be handled by a catch
- // block in an async function.
- DECL_BOOLEAN_ACCESSORS(handled_hint)
-
- static const char* Status(v8::Promise::PromiseState status);
- v8::Promise::PromiseState status() const;
-
- DECL_CAST(JSPromise)
-
- // Dispatched behavior.
- DECL_PRINTER(JSPromise)
- DECL_VERIFIER(JSPromise)
-
- // Layout description.
- static const int kResultOffset = JSObject::kHeaderSize;
- static const int kDeferredPromiseOffset = kResultOffset + kPointerSize;
- static const int kDeferredOnResolveOffset =
- kDeferredPromiseOffset + kPointerSize;
- static const int kDeferredOnRejectOffset =
- kDeferredOnResolveOffset + kPointerSize;
- static const int kFulfillReactionsOffset =
- kDeferredOnRejectOffset + kPointerSize;
- static const int kRejectReactionsOffset =
- kFulfillReactionsOffset + kPointerSize;
- static const int kFlagsOffset = kRejectReactionsOffset + kPointerSize;
- static const int kSize = kFlagsOffset + kPointerSize;
- static const int kSizeWithEmbedderFields =
- kSize + v8::Promise::kEmbedderFieldCount * kPointerSize;
-
- // Flags layout.
- // The first two bits store the v8::Promise::PromiseState.
- static const int kStatusBits = 2;
- static const int kHasHandlerBit = 2;
- static const int kHandledHintBit = 3;
-
- static const int kStatusShift = 0;
- static const int kStatusMask = 0x3;
- STATIC_ASSERT(v8::Promise::kPending == 0);
- STATIC_ASSERT(v8::Promise::kFulfilled == 1);
- STATIC_ASSERT(v8::Promise::kRejected == 2);
-};
-
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
@@ -4217,6 +4082,33 @@ class Cell: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(Cell);
};
+// This is a special cell used to maintain both the link between a
+// closure and it's feedback vector, as well as a way to count the
+// number of closures created for a certain function per native
+// context. There's at most one FeedbackCell for each function in
+// a native context.
+class FeedbackCell : public Struct {
+ public:
+ // [value]: value of the cell.
+ DECL_ACCESSORS(value, HeapObject)
+
+ DECL_CAST(FeedbackCell)
+
+ // Dispatched behavior.
+ DECL_PRINTER(FeedbackCell)
+ DECL_VERIFIER(FeedbackCell)
+
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<kValueOffset, kValueOffset + kPointerSize, kSize>
+ BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackCell);
+};
class PropertyCell : public HeapObject {
public:
@@ -4415,10 +4307,10 @@ class JSProxy: public JSReceiver {
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
- static Maybe<bool> SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
- Handle<Symbol> private_name,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ static Maybe<bool> SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
@@ -4459,9 +4351,14 @@ class JSAsyncFromSyncIterator : public JSObject {
// (proposal-async-iteration/#table-async-from-sync-iterator-internal-slots)
DECL_ACCESSORS(sync_iterator, JSReceiver)
+ // The "next" method is loaded during GetIterator, and is not reloaded for
+ // subsequent "next" invocations.
+ DECL_ACCESSORS(next, Object)
+
// Offsets of object fields.
static const int kSyncIteratorOffset = JSObject::kHeaderSize;
- static const int kSize = kSyncIteratorOffset + kPointerSize;
+ static const int kNextOffset = kSyncIteratorOffset + kPointerSize;
+ static const int kSize = kNextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncFromSyncIterator);
@@ -5002,7 +4899,7 @@ class StackFrameInfo : public Struct {
class SourcePositionTableWithFrameCache : public Tuple2 {
public:
DECL_ACCESSORS(source_position_table, ByteArray)
- DECL_ACCESSORS(stack_frame_cache, NumberDictionary)
+ DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
DECL_CAST(SourcePositionTableWithFrameCache)
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index df5f854395..c107ab8cd1 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -40,7 +40,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
// Allocation helpers.
- static MaybeHandle<MutableBigInt> New(Isolate* isolate, int length);
+ static MaybeHandle<MutableBigInt> New(Isolate* isolate, int length,
+ PretenureFlag pretenure = NOT_TENURED);
static Handle<BigInt> NewFromInt(Isolate* isolate, int value);
static Handle<BigInt> NewFromSafeInteger(Isolate* isolate, double value);
void InitializeDigits(int length, byte value = 0);
@@ -145,6 +146,10 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static Rounding DecideRounding(Handle<BigIntBase> x, int mantissa_bits_unset,
int digit_index, uint64_t current_digit);
+ // Returns the least significant 64 bits, simulating two's complement
+ // representation.
+ static uint64_t GetRawBits(BigIntBase* x, bool* lossless);
+
// Digit arithmetic helpers.
static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
@@ -174,14 +179,18 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
(*reinterpret_cast<digit_t*>(reinterpret_cast<intptr_t>(address))) = value;
}
#include "src/objects/object-macros-undef.h"
+
+ void set_64_bits(uint64_t bits);
};
-MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length) {
+MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
+ PretenureFlag pretenure) {
if (length > BigInt::kMaxLength) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
MutableBigInt);
}
- Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(length));
+ Handle<MutableBigInt> result =
+ Cast(isolate->factory()->NewBigInt(length, pretenure));
result->set_length(length);
result->set_sign(false);
#if DEBUG
@@ -218,13 +227,7 @@ Handle<BigInt> MutableBigInt::NewFromSafeInteger(Isolate* isolate,
Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(length));
result->set_length(length);
result->set_sign(value < 0); // Treats -0 like 0.
- if (kDigitBits == 64) {
- result->set_digit(0, absolute);
- } else {
- DCHECK_EQ(kDigitBits, 32);
- result->set_digit(0, absolute);
- result->set_digit(1, absolute >> 32);
- }
+ result->set_64_bits(absolute);
return MakeImmutable(result);
}
@@ -1702,7 +1705,8 @@ static const int kBitsPerCharTableShift = 5;
static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
- Isolate* isolate, int radix, int charcount, ShouldThrow should_throw) {
+ Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ PretenureFlag pretenure) {
DCHECK(2 <= radix && radix <= 36);
DCHECK_GE(charcount, 0);
size_t bits_per_char = kMaxBitsPerChar[radix];
@@ -1717,7 +1721,7 @@ MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
int length = (static_cast<int>(bits_min) + kDigitBits - 1) / kDigitBits;
if (length <= kMaxLength) {
Handle<MutableBigInt> result =
- MutableBigInt::New(isolate, length).ToHandleChecked();
+ MutableBigInt::New(isolate, length, pretenure).ToHandleChecked();
result->InitializeDigits(length);
return result;
}
@@ -2079,6 +2083,68 @@ Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(int n,
return MakeImmutable(result);
}
+Handle<BigInt> BigInt::FromInt64(Isolate* isolate, int64_t n) {
+ if (n == 0) return MutableBigInt::Zero(isolate);
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ int length = 64 / kDigitBits;
+ Handle<MutableBigInt> result =
+ MutableBigInt::Cast(isolate->factory()->NewBigInt(length));
+ result->set_length(length);
+ uint64_t absolute;
+ if (n > 0) {
+ result->set_sign(false);
+ absolute = static_cast<uint64_t>(n);
+ } else {
+ result->set_sign(true);
+ if (n == std::numeric_limits<int64_t>::min()) {
+ absolute = static_cast<uint64_t>(std::numeric_limits<int64_t>::max()) + 1;
+ } else {
+ absolute = static_cast<uint64_t>(-n);
+ }
+ }
+ result->set_64_bits(absolute);
+ return MutableBigInt::MakeImmutable(result);
+}
+
+Handle<BigInt> BigInt::FromUint64(Isolate* isolate, uint64_t n) {
+ if (n == 0) return MutableBigInt::Zero(isolate);
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ int length = 64 / kDigitBits;
+ Handle<MutableBigInt> result =
+ MutableBigInt::Cast(isolate->factory()->NewBigInt(length));
+ result->set_length(length);
+ result->set_sign(false);
+ result->set_64_bits(n);
+ return MutableBigInt::MakeImmutable(result);
+}
+
+uint64_t MutableBigInt::GetRawBits(BigIntBase* x, bool* lossless) {
+ if (lossless != nullptr) *lossless = true;
+ if (x->is_zero()) return 0;
+ int len = x->length();
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ if (lossless != nullptr && len > 64 / kDigitBits) *lossless = false;
+ uint64_t raw = static_cast<uint64_t>(x->digit(0));
+ if (kDigitBits == 32 && len > 1) {
+ raw |= static_cast<uint64_t>(x->digit(1)) << 32;
+ }
+ // Simulate two's complement. MSVC dislikes "-raw".
+ return x->sign() ? ((~raw) + 1u) : raw;
+}
+
+int64_t BigInt::AsInt64(bool* lossless) {
+ uint64_t raw = MutableBigInt::GetRawBits(this, lossless);
+ int64_t result = static_cast<int64_t>(raw);
+ if (lossless != nullptr && (result < 0) != sign()) *lossless = false;
+ return result;
+}
+
+uint64_t BigInt::AsUint64(bool* lossless) {
+ uint64_t result = MutableBigInt::GetRawBits(this, lossless);
+ if (lossless != nullptr && sign()) *lossless = false;
+ return result;
+}
+
// Digit arithmetic helpers.
#if V8_TARGET_ARCH_32_BIT
@@ -2240,20 +2306,30 @@ BigInt::digit_t MutableBigInt::digit_pow(digit_t base, digit_t exponent) {
#undef HAVE_TWODIGIT_T
+void MutableBigInt::set_64_bits(uint64_t bits) {
+ STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
+ if (kDigitBits == 64) {
+ set_digit(0, static_cast<digit_t>(bits));
+ } else {
+ set_digit(0, static_cast<digit_t>(bits & 0xFFFFFFFFu));
+ set_digit(1, static_cast<digit_t>(bits >> 32));
+ }
+}
+
#ifdef OBJECT_PRINT
void BigInt::BigIntPrint(std::ostream& os) {
DisallowHeapAllocation no_gc;
HeapObject::PrintHeader(os, "BigInt");
int len = length();
- os << "- length: " << len << "\n";
- os << "- sign: " << sign() << "\n";
+ os << "\n- length: " << len;
+ os << "\n- sign: " << sign();
if (len > 0) {
- os << "- digits:";
+ os << "\n- digits:";
for (int i = 0; i < len; i++) {
os << "\n 0x" << std::hex << digit(i);
}
- os << std::dec << "\n";
}
+ os << std::dec << "\n";
}
#endif // OBJECT_PRINT
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 9e29a69b3b..7409f0bade 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -137,6 +137,11 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
static Handle<BigInt> AsIntN(uint64_t n, Handle<BigInt> x);
static MaybeHandle<BigInt> AsUintN(uint64_t n, Handle<BigInt> x);
+ static Handle<BigInt> FromInt64(Isolate* isolate, int64_t n);
+ static Handle<BigInt> FromUint64(Isolate* isolate, uint64_t n);
+ int64_t AsInt64(bool* lossless = nullptr);
+ uint64_t AsUint64(bool* lossless = nullptr);
+
DECL_CAST(BigInt)
DECL_VERIFIER(BigInt)
DECL_PRINTER(BigInt)
@@ -162,12 +167,13 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
class BodyDescriptor;
private:
- friend class BigIntParseIntHelper;
+ friend class StringToBigIntHelper;
- // Special functions for BigIntParseIntHelper:
+ // Special functions for StringToBigIntHelper:
static Handle<BigInt> Zero(Isolate* isolate);
static MaybeHandle<FreshlyAllocatedBigInt> AllocateFor(
- Isolate* isolate, int radix, int charcount, ShouldThrow should_throw);
+ Isolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ PretenureFlag pretenure);
static void InplaceMultiplyAdd(Handle<FreshlyAllocatedBigInt> x,
uintptr_t factor, uintptr_t summand);
static Handle<BigInt> Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign);
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 4c3e7f0d97..8b14034f26 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -26,7 +26,6 @@ CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
-CAST_ACCESSOR(HandlerTable)
int AbstractCode::instruction_size() {
if (IsCode()) {
@@ -149,12 +148,12 @@ void DependentCode::copy(int from, int to) {
}
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, \
!GetHeap()->InNewSpace(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
-CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
CODE_ACCESSORS(protected_instructions, FixedArray, kProtectedInstructionsOffset)
@@ -164,7 +163,6 @@ CODE_ACCESSORS(trap_handler_index, Smi, kTrapHandlerIndex)
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, nullptr);
- WRITE_FIELD(this, kHandlerTableOffset, nullptr);
WRITE_FIELD(this, kDeoptimizationDataOffset, nullptr);
WRITE_FIELD(this, kSourcePositionTableOffset, nullptr);
WRITE_FIELD(this, kProtectedInstructionsOffset, nullptr);
@@ -204,14 +202,35 @@ void Code::set_next_code_link(Object* value) {
code_data_container()->set_next_code_link(value);
}
+int Code::InstructionSize() {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionSize();
+#endif
+ return instruction_size();
+}
+
byte* Code::instruction_start() const {
return const_cast<byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
}
+Address Code::InstructionStart() {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionStart();
+#endif
+ return instruction_start();
+}
+
byte* Code::instruction_end() const {
return instruction_start() + instruction_size();
}
+Address Code::InstructionEnd() {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionEnd();
+#endif
+ return instruction_end();
+}
+
int Code::GetUnwindingInfoSizeOffset() const {
DCHECK(has_unwinding_info());
return RoundUp(kHeaderSize + instruction_size(), kInt64Size);
@@ -252,7 +271,6 @@ int Code::SizeIncludingMetadata() const {
int size = CodeSize();
size += relocation_info()->Size();
size += deoptimization_data()->Size();
- size += handler_table()->Size();
size += protected_instructions()->Size();
return size;
}
@@ -618,7 +636,7 @@ int BytecodeArray::parameter_count() const {
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
-ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
+ACCESSORS(BytecodeArray, handler_table, ByteArray, kHandlerTableOffset)
ACCESSORS(BytecodeArray, source_position_table, Object,
kSourcePositionTableOffset)
@@ -657,55 +675,6 @@ int BytecodeArray::SizeIncludingMetadata() {
return size;
}
-int HandlerTable::GetRangeStart(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeStartIndex));
-}
-
-int HandlerTable::GetRangeEnd(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeEndIndex));
-}
-
-int HandlerTable::GetRangeHandler(int index) const {
- return HandlerOffsetField::decode(
- Smi::ToInt(get(index * kRangeEntrySize + kRangeHandlerIndex)));
-}
-
-int HandlerTable::GetRangeData(int index) const {
- return Smi::ToInt(get(index * kRangeEntrySize + kRangeDataIndex));
-}
-
-void HandlerTable::SetRangeStart(int index, int value) {
- set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetRangeEnd(int index, int value) {
- set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetRangeHandler(int index, int offset,
- CatchPrediction prediction) {
- int value = HandlerOffsetField::encode(offset) |
- HandlerPredictionField::encode(prediction);
- set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetRangeData(int index, int value) {
- set(index * kRangeEntrySize + kRangeDataIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetReturnOffset(int index, int value) {
- set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
-}
-
-void HandlerTable::SetReturnHandler(int index, int offset) {
- int value = HandlerOffsetField::encode(offset);
- set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
-}
-
-int HandlerTable::NumberOfRangeEntries() const {
- return length() / kRangeEntrySize;
-}
-
BailoutId DeoptimizationData::BytecodeOffset(int i) {
return BailoutId(BytecodeOffsetRaw(i)->value());
}
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index c43e07c1f9..19e1002f77 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_CODE_H_
#define V8_OBJECTS_CODE_H_
+#include "src/handler-table.h"
#include "src/objects.h"
#include "src/objects/fixed-array.h"
@@ -18,92 +19,6 @@ class ByteArray;
class BytecodeArray;
class CodeDataContainer;
-// HandlerTable is a fixed array containing entries for exception handlers in
-// the code object it is associated with. The tables comes in two flavors:
-// 1) Based on ranges: Used for unoptimized code. Contains one entry per
-// exception handler and a range representing the try-block covered by that
-// handler. Layout looks as follows:
-// [ range-start , range-end , handler-offset , handler-data ]
-// 2) Based on return addresses: Used for turbofanned code. Contains one entry
-// per call-site that could throw an exception. Layout looks as follows:
-// [ return-address-offset , handler-offset ]
-class HandlerTable : public FixedArray {
- public:
- // Conservative prediction whether a given handler will locally catch an
- // exception or cause a re-throw to outside the code boundary. Since this is
- // undecidable it is merely an approximation (e.g. useful for debugger).
- enum CatchPrediction {
- UNCAUGHT, // The handler will (likely) rethrow the exception.
- CAUGHT, // The exception will be caught by the handler.
- PROMISE, // The exception will be caught and cause a promise rejection.
- DESUGARING, // The exception will be caught, but both the exception and the
- // catching are part of a desugaring and should therefore not
- // be visible to the user (we won't notify the debugger of such
- // exceptions).
- ASYNC_AWAIT, // The exception will be caught and cause a promise rejection
- // in the desugaring of an async function, so special
- // async/await handling in the debugger can take place.
- };
-
- // Getters for handler table based on ranges.
- inline int GetRangeStart(int index) const;
- inline int GetRangeEnd(int index) const;
- inline int GetRangeHandler(int index) const;
- inline int GetRangeData(int index) const;
-
- // Setters for handler table based on ranges.
- inline void SetRangeStart(int index, int value);
- inline void SetRangeEnd(int index, int value);
- inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
- inline void SetRangeData(int index, int value);
-
- // Setters for handler table based on return addresses.
- inline void SetReturnOffset(int index, int value);
- inline void SetReturnHandler(int index, int offset);
-
- // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
- // the start of the potentially throwing instruction (using return addresses
- // for this value would be invalid).
- int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
-
- // Lookup handler in a table based on return addresses.
- int LookupReturn(int pc_offset);
-
- // Returns the number of entries in the table.
- inline int NumberOfRangeEntries() const;
-
- // Returns the required length of the underlying fixed array.
- static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
- static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
-
- // Returns an empty handler table.
- static Handle<HandlerTable> Empty(Isolate* isolate);
-
- DECL_CAST(HandlerTable)
-
-#ifdef ENABLE_DISASSEMBLER
- void HandlerTableRangePrint(std::ostream& os); // NOLINT
- void HandlerTableReturnPrint(std::ostream& os); // NOLINT
-#endif
-
- private:
- // Layout description for handler table based on ranges.
- static const int kRangeStartIndex = 0;
- static const int kRangeEndIndex = 1;
- static const int kRangeHandlerIndex = 2;
- static const int kRangeDataIndex = 3;
- static const int kRangeEntrySize = 4;
-
- // Layout description for handler table based on return addresses.
- static const int kReturnOffsetIndex = 0;
- static const int kReturnHandlerIndex = 1;
- static const int kReturnEntrySize = 2;
-
- // Encoding of the {handler} field.
- class HandlerPredictionField : public BitField<CatchPrediction, 0, 3> {};
- class HandlerOffsetField : public BitField<int, 3, 29> {};
-};
-
// Code describes objects with on-the-fly generated machine code.
class Code : public HeapObject {
public:
@@ -133,27 +48,28 @@ class Code : public HeapObject {
static const char* Kind2String(Kind kind);
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
- // Printing
- static const char* ICState2String(InlineCacheState state);
-#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-
#ifdef ENABLE_DISASSEMBLER
void Disassemble(const char* name, std::ostream& os,
void* current_pc = nullptr); // NOLINT
#endif
- // [instruction_size]: Size of the native instructions
+ // [instruction_size]: Size of the native instructions, including embedded
+ // data such as the safepoints table.
inline int instruction_size() const;
inline void set_instruction_size(int value);
+ // Returns the size of the native instructions, including embedded
+ // data such as the safepoints table. For off-heap code objects
+ // this may from instruction_size in that this will return the size of the
+ // off-heap instruction stream rather than the on-heap trampoline located
+ // at instruction_start.
+ inline int InstructionSize();
+ int OffHeapInstructionSize();
+
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
void InvalidateEmbeddedObjects();
- // [handler_table]: Fixed array containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, FixedArray)
-
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
@@ -245,6 +161,11 @@ class Code : public HeapObject {
inline int safepoint_table_offset() const;
inline void set_safepoint_table_offset(int offset);
+ // [handler_table_offset]: The offset in the instruction stream where the
+ // exception handler table starts.
+ inline int handler_table_offset() const;
+ inline void set_handler_table_offset(int offset);
+
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
inline bool marked_for_deoptimization() const;
@@ -302,9 +223,21 @@ class Code : public HeapObject {
// Returns the address of the first instruction.
inline byte* instruction_start() const;
+ // Returns the address of the first instruction. For off-heap code objects
+ // this differs from instruction_start (which would point to the off-heap
+ // trampoline instead).
+ inline Address InstructionStart();
+ Address OffHeapInstructionStart();
+
// Returns the address right after the last instruction.
inline byte* instruction_end() const;
+ // Returns the address right after the last instruction. For off-heap code
+ // objects this differs from instruction_end (which would point to the
+ // off-heap trampoline instead).
+ inline Address InstructionEnd();
+ Address OffHeapInstructionEnd();
+
// Returns the size of the instructions, padding, relocation and unwinding
// information.
inline int body_size() const;
@@ -434,9 +367,8 @@ class Code : public HeapObject {
// Layout description.
static const int kRelocationInfoOffset = HeapObject::kHeaderSize;
- static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
static const int kDeoptimizationDataOffset =
- kHandlerTableOffset + kPointerSize;
+ kRelocationInfoOffset + kPointerSize;
static const int kSourcePositionTableOffset =
kDeoptimizationDataOffset + kPointerSize;
static const int kProtectedInstructionsOffset =
@@ -447,7 +379,9 @@ class Code : public HeapObject {
kCodeDataContainerOffset + kPointerSize;
static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
static const int kSafepointTableOffsetOffset = kFlagsOffset + kIntSize;
- static const int kStubKeyOffset = kSafepointTableOffsetOffset + kIntSize;
+ static const int kHandlerTableOffsetOffset =
+ kSafepointTableOffsetOffset + kIntSize;
+ static const int kStubKeyOffset = kHandlerTableOffsetOffset + kIntSize;
static const int kConstantPoolOffset = kStubKeyOffset + kIntSize;
static const int kBuiltinIndexOffset =
kConstantPoolOffset + kConstantPoolSize;
@@ -584,7 +518,7 @@ class AbstractCode : public HeapObject {
inline Object* stack_frame_cache();
static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
- Handle<NumberDictionary> cache);
+ Handle<SimpleNumberDictionary> cache);
void DropStackFrameCache();
// Returns the size of instructions and the metadata.
@@ -787,7 +721,7 @@ class BytecodeArray : public FixedArrayBase {
DECL_ACCESSORS(constant_pool, FixedArray)
// Accessors for handler table containing offsets of exception handlers.
- DECL_ACCESSORS(handler_table, FixedArray)
+ DECL_ACCESSORS(handler_table, ByteArray)
// Accessors for source position table containing mappings between byte code
// offset and source position or SourcePositionTableWithFrameCache.
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index 16bced9998..5836b01091 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -37,21 +37,21 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
static const int kEntrySize = 3;
};
-class InfoVectorPair {
+class InfoCellPair {
public:
- InfoVectorPair() : shared_(nullptr), vector_cell_(nullptr) {}
- InfoVectorPair(SharedFunctionInfo* shared, Cell* vector_cell)
- : shared_(shared), vector_cell_(vector_cell) {}
+ InfoCellPair() : shared_(nullptr), feedback_cell_(nullptr) {}
+ InfoCellPair(SharedFunctionInfo* shared, FeedbackCell* feedback_cell)
+ : shared_(shared), feedback_cell_(feedback_cell) {}
+ FeedbackCell* feedback_cell() const { return feedback_cell_; }
SharedFunctionInfo* shared() const { return shared_; }
- Cell* vector() const { return vector_cell_; }
+ bool has_feedback_cell() const { return feedback_cell_ != nullptr; }
bool has_shared() const { return shared_ != nullptr; }
- bool has_vector() const { return vector_cell_ != nullptr; }
private:
SharedFunctionInfo* shared_;
- Cell* vector_cell_;
+ FeedbackCell* feedback_cell_;
};
// This cache is used in two different variants. For regexp caching, it simply
@@ -71,12 +71,12 @@ class CompilationCacheTable
// Find cached value for a string key, otherwise return null.
Handle<Object> Lookup(Handle<String> src, Handle<Context> context,
LanguageMode language_mode);
- InfoVectorPair LookupScript(Handle<String> src, Handle<Context> context,
- LanguageMode language_mode);
- InfoVectorPair LookupEval(Handle<String> src,
- Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- LanguageMode language_mode, int position);
+ MaybeHandle<SharedFunctionInfo> LookupScript(Handle<String> src,
+ Handle<Context> context,
+ LanguageMode language_mode);
+ InfoCellPair LookupEval(Handle<String> src, Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ LanguageMode language_mode, int position);
Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> Put(Handle<CompilationCacheTable> cache,
Handle<String> src,
@@ -86,11 +86,12 @@ class CompilationCacheTable
static Handle<CompilationCacheTable> PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<Context> context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value, Handle<Cell> literals);
+ Handle<SharedFunctionInfo> value);
static Handle<CompilationCacheTable> PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
- Handle<Context> native_context, Handle<Cell> literals, int position);
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position);
static Handle<CompilationCacheTable> PutRegExp(
Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index 40c3658e60..f0650479f7 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATA_HANDLER_INL_H_
-#define V8_DATA_HANDLER_INL_H_
+#ifndef V8_OBJECTS_DATA_HANDLER_INL_H_
+#define V8_OBJECTS_DATA_HANDLER_INL_H_
#include "src/objects/data-handler.h"
@@ -38,4 +38,4 @@ ACCESSORS_CHECKED(DataHandler, data3, Object, kData3Offset,
#include "src/objects/object-macros-undef.h"
-#endif // V8_DATA_HANDLER_INL_H_
+#endif // V8_OBJECTS_DATA_HANDLER_INL_H_
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index f11d00fa38..8b3298207f 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATA_HANDLER_H_
-#define V8_DATA_HANDLER_H_
+#ifndef V8_OBJECTS_DATA_HANDLER_H_
+#define V8_OBJECTS_DATA_HANDLER_H_
#include "src/objects.h"
@@ -60,4 +60,4 @@ class DataHandler : public Struct {
#include "src/objects/object-macros-undef.h"
-#endif // V8_DATA_HANDLER_H_
+#endif // V8_OBJECTS_DATA_HANDLER_H_
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 002ac5215d..084ea7b15c 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -28,7 +28,7 @@ ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionOffset)
-ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsOffset)
+ACCESSORS(BreakPointInfo, break_points, Object, kBreakPointsOffset)
SMI_ACCESSORS(BreakPoint, id, kIdOffset)
ACCESSORS(BreakPoint, condition, String, kConditionOffset)
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index c0425fca8a..6505ca6e7f 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -23,12 +23,30 @@ bool DebugInfo::ClearBreakInfo() {
set_debug_bytecode_array(isolate->heap()->undefined_value());
set_break_points(isolate->heap()->empty_fixed_array());
- int new_flags = flags() & ~kHasBreakInfo & ~kPreparedForBreakpoints;
+ int new_flags = flags();
+ new_flags &= ~kHasBreakInfo & ~kPreparedForBreakpoints;
+ new_flags &= ~kBreakAtEntry & ~kCanBreakAtEntry;
set_flags(new_flags);
return new_flags == kNone;
}
+void DebugInfo::SetBreakAtEntry() {
+ DCHECK(CanBreakAtEntry());
+ set_flags(flags() | kBreakAtEntry);
+}
+
+void DebugInfo::ClearBreakAtEntry() {
+ DCHECK(CanBreakAtEntry());
+ set_flags(flags() & ~kBreakAtEntry);
+}
+
+bool DebugInfo::BreakAtEntry() const { return (flags() & kBreakAtEntry) != 0; }
+
+bool DebugInfo::CanBreakAtEntry() const {
+ return (flags() & kCanBreakAtEntry) != 0;
+}
+
// Check if there is a break point at this source position.
bool DebugInfo::HasBreakPoint(int source_position) {
DCHECK(HasBreakInfo());
@@ -45,14 +63,12 @@ bool DebugInfo::HasBreakPoint(int source_position) {
Object* DebugInfo::GetBreakPointInfo(int source_position) {
DCHECK(HasBreakInfo());
Isolate* isolate = GetIsolate();
- if (!break_points()->IsUndefined(isolate)) {
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->source_position() == source_position) {
- return break_point_info;
- }
+ for (int i = 0; i < break_points()->length(); i++) {
+ if (!break_points()->get(i)->IsUndefined(isolate)) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(break_points()->get(i));
+ if (break_point_info->source_position() == source_position) {
+ return break_point_info;
}
}
}
@@ -60,18 +76,16 @@ Object* DebugInfo::GetBreakPointInfo(int source_position) {
}
bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
Isolate* isolate = debug_info->GetIsolate();
- if (debug_info->break_points()->IsUndefined(isolate)) return false;
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- BreakPointInfo::ClearBreakPoint(break_point_info, break_point_object);
+ if (BreakPointInfo::HasBreakPoint(break_point_info, break_point)) {
+ BreakPointInfo::ClearBreakPoint(break_point_info, break_point);
return true;
}
}
@@ -79,14 +93,14 @@ bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
}
void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
Isolate* isolate = debug_info->GetIsolate();
Handle<Object> break_point_info(
debug_info->GetBreakPointInfo(source_position), isolate);
if (!break_point_info->IsUndefined(isolate)) {
BreakPointInfo::SetBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info), break_point_object);
+ Handle<BreakPointInfo>::cast(break_point_info), break_point);
return;
}
@@ -102,8 +116,8 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
}
if (index == kNoBreakPointInfo) {
// No free slot - extend break point info array.
- Handle<FixedArray> old_break_points = Handle<FixedArray>(
- FixedArray::cast(debug_info->break_points()), isolate);
+ Handle<FixedArray> old_break_points =
+ Handle<FixedArray>(debug_info->break_points(), isolate);
Handle<FixedArray> new_break_points = isolate->factory()->NewFixedArray(
old_break_points->length() +
DebugInfo::kEstimatedNofBreakPointsInFunction);
@@ -119,27 +133,26 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
// Allocate new BreakPointInfo object and set the break point.
Handle<BreakPointInfo> new_break_point_info =
isolate->factory()->NewBreakPointInfo(source_position);
- BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(new_break_point_info, break_point);
debug_info->break_points()->set(index, *new_break_point_info);
}
// Get the break point objects for a source position.
-Handle<Object> DebugInfo::GetBreakPointObjects(int source_position) {
+Handle<Object> DebugInfo::GetBreakPoints(int source_position) {
DCHECK(HasBreakInfo());
Object* break_point_info = GetBreakPointInfo(source_position);
Isolate* isolate = GetIsolate();
if (break_point_info->IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
- return Handle<Object>(
- BreakPointInfo::cast(break_point_info)->break_point_objects(), isolate);
+ return Handle<Object>(BreakPointInfo::cast(break_point_info)->break_points(),
+ isolate);
}
// Get the total number of break points.
int DebugInfo::GetBreakPointCount() {
DCHECK(HasBreakInfo());
Isolate* isolate = GetIsolate();
- if (break_points()->IsUndefined(isolate)) return 0;
int count = 0;
for (int i = 0; i < break_points()->length(); i++) {
if (!break_points()->get(i)->IsUndefined(isolate)) {
@@ -151,19 +164,16 @@ int DebugInfo::GetBreakPointCount() {
return count;
}
-Handle<Object> DebugInfo::FindBreakPointInfo(
- Handle<DebugInfo> debug_info, Handle<Object> break_point_object) {
+Handle<Object> DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
+ Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
Isolate* isolate = debug_info->GetIsolate();
- if (!debug_info->break_points()->IsUndefined(isolate)) {
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
- Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- return break_point_info;
- }
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
+ BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ if (BreakPointInfo::HasBreakPoint(break_point_info, break_point)) {
+ return break_point_info;
}
}
}
@@ -187,40 +197,34 @@ bool DebugInfo::ClearCoverageInfo() {
}
namespace {
-bool IsEqual(Object* break_point1, Object* break_point2) {
- // TODO(kozyatinskiy): remove non-BreakPoint logic once the JS debug API has
- // been removed.
- if (break_point1->IsBreakPoint() != break_point2->IsBreakPoint())
- return false;
- if (!break_point1->IsBreakPoint()) return break_point1 == break_point2;
- return BreakPoint::cast(break_point1)->id() ==
- BreakPoint::cast(break_point2)->id();
+bool IsEqual(BreakPoint* break_point1, BreakPoint* break_point2) {
+ return break_point1->id() == break_point2->id();
}
} // namespace
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = break_point_info->GetIsolate();
// If there are no break points just ignore.
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) return;
+ if (break_point_info->break_points()->IsUndefined(isolate)) return;
// If there is a single break point clear it if it is the same.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- if (IsEqual(break_point_info->break_point_objects(), *break_point_object)) {
- break_point_info->set_break_point_objects(
- isolate->heap()->undefined_value());
+ if (!break_point_info->break_points()->IsFixedArray()) {
+ if (IsEqual(BreakPoint::cast(break_point_info->break_points()),
+ *break_point)) {
+ break_point_info->set_break_points(isolate->heap()->undefined_value());
}
return;
}
// If there are multiple break points shrink the array
- DCHECK(break_point_info->break_point_objects()->IsFixedArray());
- Handle<FixedArray> old_array = Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
+ DCHECK(break_point_info->break_points()->IsFixedArray());
+ Handle<FixedArray> old_array =
+ Handle<FixedArray>(FixedArray::cast(break_point_info->break_points()));
Handle<FixedArray> new_array =
isolate->factory()->NewFixedArray(old_array->length() - 1);
int found_count = 0;
for (int i = 0; i < old_array->length(); i++) {
- if (IsEqual(old_array->get(i), *break_point_object)) {
+ if (IsEqual(BreakPoint::cast(old_array->get(i)), *break_point)) {
DCHECK_EQ(found_count, 0);
found_count++;
} else {
@@ -228,61 +232,60 @@ void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
}
}
// If the break point was found in the list change it.
- if (found_count > 0) break_point_info->set_break_point_objects(*new_array);
+ if (found_count > 0) break_point_info->set_break_points(*new_array);
}
// Add the specified break point object.
void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = break_point_info->GetIsolate();
// If there was no break point objects before just set it.
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
- break_point_info->set_break_point_objects(*break_point_object);
+ if (break_point_info->break_points()->IsUndefined(isolate)) {
+ break_point_info->set_break_points(*break_point);
return;
}
// If the break point object is the same as before just ignore.
- if (break_point_info->break_point_objects() == *break_point_object) return;
+ if (break_point_info->break_points() == *break_point) return;
// If there was one break point object before replace with array.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
+ if (!break_point_info->break_points()->IsFixedArray()) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
- array->set(0, break_point_info->break_point_objects());
- array->set(1, *break_point_object);
- break_point_info->set_break_point_objects(*array);
+ array->set(0, break_point_info->break_points());
+ array->set(1, *break_point);
+ break_point_info->set_break_points(*array);
return;
}
// If there was more than one break point before extend array.
- Handle<FixedArray> old_array = Handle<FixedArray>(
- FixedArray::cast(break_point_info->break_point_objects()));
+ Handle<FixedArray> old_array =
+ Handle<FixedArray>(FixedArray::cast(break_point_info->break_points()));
Handle<FixedArray> new_array =
isolate->factory()->NewFixedArray(old_array->length() + 1);
for (int i = 0; i < old_array->length(); i++) {
// If the break point was there before just ignore.
- if (IsEqual(old_array->get(i), *break_point_object)) return;
+ if (IsEqual(BreakPoint::cast(old_array->get(i)), *break_point)) return;
new_array->set(i, old_array->get(i));
}
// Add the new break point.
- new_array->set(old_array->length(), *break_point_object);
- break_point_info->set_break_point_objects(*new_array);
+ new_array->set(old_array->length(), *break_point);
+ break_point_info->set_break_points(*new_array);
}
-bool BreakPointInfo::HasBreakPointObject(
- Handle<BreakPointInfo> break_point_info,
- Handle<Object> break_point_object) {
+bool BreakPointInfo::HasBreakPoint(Handle<BreakPointInfo> break_point_info,
+ Handle<BreakPoint> break_point) {
// No break point.
Isolate* isolate = break_point_info->GetIsolate();
- if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
+ if (break_point_info->break_points()->IsUndefined(isolate)) {
return false;
}
// Single break point.
- if (!break_point_info->break_point_objects()->IsFixedArray()) {
- return IsEqual(break_point_info->break_point_objects(),
- *break_point_object);
+ if (!break_point_info->break_points()->IsFixedArray()) {
+ return IsEqual(BreakPoint::cast(break_point_info->break_points()),
+ *break_point);
}
// Multiple break points.
- FixedArray* array = FixedArray::cast(break_point_info->break_point_objects());
+ FixedArray* array = FixedArray::cast(break_point_info->break_points());
for (int i = 0; i < array->length(); i++) {
- if (IsEqual(array->get(i), *break_point_object)) {
+ if (IsEqual(BreakPoint::cast(array->get(i)), *break_point)) {
return true;
}
}
@@ -292,11 +295,11 @@ bool BreakPointInfo::HasBreakPointObject(
// Get the number of break points.
int BreakPointInfo::GetBreakPointCount() {
// No break point.
- if (break_point_objects()->IsUndefined(GetIsolate())) return 0;
+ if (break_points()->IsUndefined(GetIsolate())) return 0;
// Single break point.
- if (!break_point_objects()->IsFixedArray()) return 1;
+ if (!break_points()->IsFixedArray()) return 1;
// Multiple break points.
- return FixedArray::cast(break_point_objects())->length();
+ return FixedArray::cast(break_points())->length();
}
int CoverageInfo::SlotCount() const {
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 0ce134b0b3..767cd7e81b 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -14,6 +14,7 @@
namespace v8 {
namespace internal {
+class BreakPoint;
class BytecodeArray;
// The DebugInfo class holds additional information for a function being
@@ -24,7 +25,9 @@ class DebugInfo : public Struct {
kNone = 0,
kHasBreakInfo = 1 << 0,
kPreparedForBreakpoints = 1 << 1,
- kHasCoverageInfo = 2 << 1,
+ kHasCoverageInfo = 1 << 2,
+ kBreakAtEntry = 1 << 3,
+ kCanBreakAtEntry = 1 << 4
};
typedef base::Flags<Flag> Flags;
@@ -51,6 +54,12 @@ class DebugInfo : public Struct {
// DebugInfo is now empty.
bool ClearBreakInfo();
+ // Accessors to flag whether to break before entering the function.
+ // This is used to break for functions with no source, e.g. builtins.
+ void SetBreakAtEntry();
+ void ClearBreakAtEntry();
+ bool BreakAtEntry() const;
+
// The instrumented bytecode array for functions with break points.
DECL_ACCESSORS(debug_bytecode_array, Object)
@@ -61,15 +70,15 @@ class DebugInfo : public Struct {
bool HasBreakPoint(int source_position);
// Attempt to clear a break point. Return true if successful.
static bool ClearBreakPoint(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Set a break point.
static void SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Get the break point objects for a source position.
- Handle<Object> GetBreakPointObjects(int source_position);
+ Handle<Object> GetBreakPoints(int source_position);
// Find the break point info holding this break point object.
static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Get the number of break points for this function.
int GetBreakPointCount();
@@ -78,6 +87,10 @@ class DebugInfo : public Struct {
inline BytecodeArray* OriginalBytecodeArray();
inline BytecodeArray* DebugBytecodeArray();
+ // Returns whether we should be able to break before entering the function.
+ // This is true for functions with no source, e.g. builtins.
+ bool CanBreakAtEntry() const;
+
// --- Block Coverage ---
// ----------------------
@@ -122,17 +135,17 @@ class BreakPointInfo : public Tuple2 {
// The position in the source for the break position.
DECL_INT_ACCESSORS(source_position)
// List of related JavaScript break points.
- DECL_ACCESSORS(break_point_objects, Object)
+ DECL_ACCESSORS(break_points, Object)
// Removes a break point.
static void ClearBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
// Set a break point.
static void SetBreakPoint(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
- // Check if break point info has this break point object.
- static bool HasBreakPointObject(Handle<BreakPointInfo> info,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
+ // Check if break point info has this break point.
+ static bool HasBreakPoint(Handle<BreakPointInfo> info,
+ Handle<BreakPoint> break_point);
// Get the number of break points for this code offset.
int GetBreakPointCount();
@@ -141,7 +154,7 @@ class BreakPointInfo : public Tuple2 {
DECL_CAST(BreakPointInfo)
static const int kSourcePositionOffset = kValue1Offset;
- static const int kBreakPointObjectsOffset = kValue2Offset;
+ static const int kBreakPointsOffset = kValue2Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 5cf6bfb67d..7cc0e5f5b3 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -228,26 +228,70 @@ class GlobalDictionary
inline void ValueAtPut(int entry, Object* value);
};
-class NumberDictionaryShape : public BaseDictionaryShape<uint32_t> {
+class NumberDictionaryBaseShape : public BaseDictionaryShape<uint32_t> {
public:
- static const int kPrefixSize = 1;
- static const int kEntrySize = 3;
-
static inline bool IsMatch(uint32_t key, Object* other);
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
static inline uint32_t Hash(Isolate* isolate, uint32_t key);
static inline uint32_t HashForObject(Isolate* isolate, Object* object);
+};
+
+class NumberDictionaryShape : public NumberDictionaryBaseShape {
+ public:
+ static const int kPrefixSize = 1;
+ static const int kEntrySize = 3;
+
+ static inline int GetMapRootIndex();
+};
+
+class SimpleNumberDictionaryShape : public NumberDictionaryBaseShape {
+ public:
+ static const bool kHasDetails = false;
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 2;
+
+ template <typename Dictionary>
+ static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
+ UNREACHABLE();
+ }
+
+ template <typename Dictionary>
+ static inline void DetailsAtPut(Dictionary* dict, int entry,
+ PropertyDetails value) {
+ UNREACHABLE();
+ }
static inline int GetMapRootIndex();
};
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ HashTable<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape>;
+
+// SimpleNumberDictionary is used to map number to an entry.
+class SimpleNumberDictionary
+ : public Dictionary<SimpleNumberDictionary, SimpleNumberDictionaryShape> {
+ public:
+ DECL_CAST(SimpleNumberDictionary)
+ // Type specific at put (default NONE attributes is used when adding).
+ MUST_USE_RESULT static Handle<SimpleNumberDictionary> Set(
+ Handle<SimpleNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value);
+
+ static const int kEntryValueIndex = 1;
+};
+
extern template class EXPORT_TEMPLATE_DECLARE(
V8_EXPORT_PRIVATE) HashTable<NumberDictionary, NumberDictionaryShape>;
extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Dictionary<NumberDictionary, NumberDictionaryShape>;
+// NumberDictionary is used as elements backing store and provides a bitfield
+// and stores property details for every entry.
class NumberDictionary
: public Dictionary<NumberDictionary, NumberDictionaryShape> {
public:
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index edca36c92e..bee28d93e2 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -496,6 +496,16 @@ inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
return static_cast<uint8_t>(value);
}
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int value) {
+ UNREACHABLE();
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int value) {
+ UNREACHABLE();
+}
+
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
return static_cast<ElementType>(value);
@@ -509,6 +519,16 @@ inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
return static_cast<uint8_t>(value);
}
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint32_t value) {
+ UNREACHABLE();
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint32_t value) {
+ UNREACHABLE();
+}
+
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
return static_cast<ElementType>(DoubleToInt32(value));
@@ -523,6 +543,16 @@ inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
}
template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(double value) {
+ UNREACHABLE();
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(double value) {
+ UNREACHABLE();
+}
+
+template <>
inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
return static_cast<float>(value);
}
@@ -533,6 +563,60 @@ inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
}
template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(int64_t value) {
+ UNREACHABLE();
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(uint64_t value) {
+ UNREACHABLE();
+}
+
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int64_t value) {
+ return value;
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint64_t value) {
+ return value;
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int64_t value) {
+ return static_cast<uint64_t>(value);
+}
+
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint64_t value) {
+ return static_cast<int64_t>(value);
+}
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::FromHandle(
+ Handle<Object> value, bool* lossless) {
+ if (value->IsSmi()) {
+ return from(Smi::ToInt(*value));
+ }
+ DCHECK(value->IsHeapNumber());
+ return from(HeapNumber::cast(*value)->value());
+}
+
+template <>
+inline int64_t FixedTypedArray<BigInt64ArrayTraits>::FromHandle(
+ Handle<Object> value, bool* lossless) {
+ DCHECK(value->IsBigInt());
+ return BigInt::cast(*value)->AsInt64(lossless);
+}
+
+template <>
+inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::FromHandle(
+ Handle<Object> value, bool* lossless) {
+ DCHECK(value->IsBigInt());
+ return BigInt::cast(*value)->AsUint64(lossless);
+}
+
+template <class Traits>
Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
int index) {
return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
@@ -555,6 +639,20 @@ void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
set(index, cast_value);
}
+template <>
+inline void FixedTypedArray<BigInt64ArrayTraits>::SetValue(uint32_t index,
+ Object* value) {
+ DCHECK(value->IsBigInt());
+ set(index, BigInt::cast(value)->AsInt64());
+}
+
+template <>
+inline void FixedTypedArray<BigUint64ArrayTraits>::SetValue(uint32_t index,
+ Object* value) {
+ DCHECK(value->IsBigInt());
+ set(index, BigInt::cast(value)->AsUint64());
+}
+
Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
return handle(Smi::FromInt(scalar), isolate);
}
@@ -592,6 +690,15 @@ Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
return isolate->factory()->NewNumber(scalar);
}
+Handle<Object> BigInt64ArrayTraits::ToHandle(Isolate* isolate, int64_t scalar) {
+ return BigInt::FromInt64(isolate, scalar);
+}
+
+Handle<Object> BigUint64ArrayTraits::ToHandle(Isolate* isolate,
+ uint64_t scalar) {
+ return BigInt::FromUint64(isolate, scalar);
+}
+
// static
template <class Traits>
STATIC_CONST_MEMBER_DEFINITION const InstanceType
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 5d78af8799..1861f0c735 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -103,7 +103,8 @@ class FixedArray : public FixedArrayBase {
// Return a grown copy if the index is bigger than the array's length.
static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
- Handle<Object> value);
+ Handle<Object> value,
+ PretenureFlag pretenure = NOT_TENURED);
// Setter that uses write barrier.
inline void set(int index, Object* value);
@@ -466,16 +467,18 @@ class PodArray : public ByteArray {
};
// V has parameters (Type, type, TYPE, C type, element_size)
-#define TYPED_ARRAYS(V) \
- V(Uint8, uint8, UINT8, uint8_t, 1) \
- V(Int8, int8, INT8, int8_t, 1) \
- V(Uint16, uint16, UINT16, uint16_t, 2) \
- V(Int16, int16, INT16, int16_t, 2) \
- V(Uint32, uint32, UINT32, uint32_t, 4) \
- V(Int32, int32, INT32, int32_t, 4) \
- V(Float32, float32, FLOAT32, float, 4) \
- V(Float64, float64, FLOAT64, double, 8) \
- V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t, 1) \
+ V(Int8, int8, INT8, int8_t, 1) \
+ V(Uint16, uint16, UINT16, uint16_t, 2) \
+ V(Int16, int16, INT16, int16_t, 2) \
+ V(Uint32, uint32, UINT32, uint32_t, 4) \
+ V(Int32, int32, INT32, int32_t, 4) \
+ V(Float32, float32, FLOAT32, float, 4) \
+ V(Float64, float64, FLOAT64, double, 8) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1) \
+ V(BigUint64, biguint64, BIGUINT64, uint64_t, 8) \
+ V(BigInt64, bigint64, BIGINT64, int64_t, 8)
class FixedTypedArrayBase : public FixedArrayBase {
public:
@@ -548,6 +551,11 @@ class FixedTypedArray : public FixedTypedArrayBase {
static inline ElementType from(int value);
static inline ElementType from(uint32_t value);
static inline ElementType from(double value);
+ static inline ElementType from(int64_t value);
+ static inline ElementType from(uint64_t value);
+
+ static inline ElementType FromHandle(Handle<Object> value,
+ bool* lossless = nullptr);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 9688717e76..6b8e18014a 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -12,6 +12,7 @@
#include "src/api.h"
#include "src/factory.h"
+#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/property-descriptor.h"
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 1128e190b2..6bba2f0054 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -204,15 +204,6 @@ void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
}
-bool JSTypedArray::HasJSTypedArrayPrototype(Isolate* isolate) {
- DisallowHeapAllocation no_gc;
- Object* proto = map()->prototype();
- if (!proto->IsJSObject()) return false;
-
- JSObject* proto_obj = JSObject::cast(proto);
- return proto_obj->map()->prototype() == *isolate->typed_array_prototype();
-}
-
// static
MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -236,26 +227,6 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
-// static
-Handle<JSFunction> JSTypedArray::DefaultConstructor(
- Isolate* isolate, Handle<JSTypedArray> exemplar) {
- Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
- switch (exemplar->type()) {
-#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: { \
- default_ctor = isolate->type##_array_fun(); \
- break; \
- }
-
- TYPED_ARRAYS(TYPED_ARRAY_CTOR)
-#undef TYPED_ARRAY_CTOR
- default:
- UNREACHABLE();
- }
-
- return default_ctor;
-}
-
#ifdef VERIFY_HEAP
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
#endif
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 806c275c8f..09a54b38c1 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -299,22 +299,9 @@ class JSTypedArray : public JSArrayBufferView {
Handle<JSArrayBuffer> GetBuffer();
- inline bool HasJSTypedArrayPrototype(Isolate* isolate);
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
- static inline Handle<JSFunction> DefaultConstructor(
- Isolate* isolate, Handle<JSTypedArray> exemplar);
- // ES7 section 22.2.4.6 Create ( constructor, argumentList )
- static MaybeHandle<JSTypedArray> Create(Isolate* isolate,
- Handle<Object> default_ctor, int argc,
- Handle<Object>* argv,
- const char* method_name);
- // ES7 section 22.2.4.7 TypedArraySpeciesCreate ( exemplar, argumentList )
- static MaybeHandle<JSTypedArray> SpeciesCreate(Isolate* isolate,
- Handle<JSTypedArray> exemplar,
- int argc, Handle<Object>* argv,
- const char* method_name);
// Dispatched behavior.
DECL_PRINTER(JSTypedArray)
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
new file mode 100644
index 0000000000..afe297b880
--- /dev/null
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_PROMISE_INL_H_
+#define V8_OBJECTS_JS_PROMISE_INL_H_
+
+#include "src/objects.h"
+#include "src/objects/js-promise.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
+CAST_ACCESSOR(JSPromise)
+
+ACCESSORS(JSPromise, reactions_or_result, Object, kReactionsOrResultOffset)
+SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
+BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
+BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
+
+Object* JSPromise::result() const {
+ DCHECK_NE(Promise::kPending, status());
+ return reactions_or_result();
+}
+
+Object* JSPromise::reactions() const {
+ DCHECK_EQ(Promise::kPending, status());
+ return reactions_or_result();
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PROMISE_INL_H_
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
new file mode 100644
index 0000000000..b454084b8e
--- /dev/null
+++ b/deps/v8/src/objects/js-promise.h
@@ -0,0 +1,105 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_PROMISE_H_
+#define V8_OBJECTS_JS_PROMISE_H_
+
+#include "src/objects.h"
+#include "src/objects/promise.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Representation of promise objects in the specification. Our layout of
+// JSPromise differs a bit from the layout in the specification, for example
+// there's only a single list of PromiseReaction objects, instead of separate
+// lists for fulfill and reject reactions. The PromiseReaction carries both
+// callbacks from the start, and is eventually morphed into the proper kind of
+// PromiseReactionJobTask when the JSPromise is settled.
+//
+// We also overlay the result and reactions fields on the JSPromise, since
+// the reactions are only necessary for pending promises, whereas the result
+// is only meaningful for settled promises.
+class JSPromise : public JSObject {
+ public:
+ // [reactions_or_result]: Smi 0 terminated list of PromiseReaction objects
+ // in case the JSPromise was not settled yet, otherwise the result.
+ DECL_ACCESSORS(reactions_or_result, Object)
+
+ // [result]: Checks that the promise is settled and returns the result.
+ inline Object* result() const;
+
+ // [reactions]: Checks that the promise is pending and returns the reactions.
+ inline Object* reactions() const;
+
+ DECL_INT_ACCESSORS(flags)
+
+ // [has_handler]: Whether this promise has a reject handler or not.
+ DECL_BOOLEAN_ACCESSORS(has_handler)
+
+ // [handled_hint]: Whether this promise will be handled by a catch
+ // block in an async function.
+ DECL_BOOLEAN_ACCESSORS(handled_hint)
+
+ static const char* Status(Promise::PromiseState status);
+ Promise::PromiseState status() const;
+ void set_status(Promise::PromiseState status);
+
+ // ES section #sec-fulfillpromise
+ static Handle<Object> Fulfill(Handle<JSPromise> promise,
+ Handle<Object> value);
+ // ES section #sec-rejectpromise
+ static Handle<Object> Reject(Handle<JSPromise> promise, Handle<Object> reason,
+ bool debug_event = true);
+ // ES section #sec-promise-resolve-functions
+ MUST_USE_RESULT static MaybeHandle<Object> Resolve(Handle<JSPromise> promise,
+ Handle<Object> resolution);
+
+ // This is a helper that extracts the JSPromise from the input
+ // {object}, which is used as a payload for PromiseReaction and
+ // PromiseReactionJobTask.
+ MUST_USE_RESULT static MaybeHandle<JSPromise> From(Handle<HeapObject> object);
+
+ DECL_CAST(JSPromise)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSPromise)
+ DECL_VERIFIER(JSPromise)
+
+ // Layout description.
+ static const int kReactionsOrResultOffset = JSObject::kHeaderSize;
+ static const int kFlagsOffset = kReactionsOrResultOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::Promise::kEmbedderFieldCount * kPointerSize;
+
+ // Flags layout.
+ // The first two bits store the v8::Promise::PromiseState.
+ static const int kStatusBits = 2;
+ static const int kHasHandlerBit = 2;
+ static const int kHandledHintBit = 3;
+
+ static const int kStatusShift = 0;
+ static const int kStatusMask = 0x3;
+ STATIC_ASSERT(v8::Promise::kPending == 0);
+ STATIC_ASSERT(v8::Promise::kFulfilled == 1);
+ STATIC_ASSERT(v8::Promise::kRejected == 2);
+
+ private:
+ // ES section #sec-triggerpromisereactions
+ static Handle<Object> TriggerPromiseReactions(Isolate* isolate,
+ Handle<Object> reactions,
+ Handle<Object> argument,
+ PromiseReaction::Type type);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PROMISE_H_
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 69cd5c3104..6a0c4e3391 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -48,7 +48,7 @@ class JSRegExp : public JSObject {
};
typedef base::Flags<Flag> Flags;
- static int FlagCount() { return 6; }
+ static constexpr int FlagCount() { return 6; }
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 34a427c67b..fa9fcedaab 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITERAL_OBJECTS_INL_H_
-#define V8_LITERAL_OBJECTS_INL_H_
+#ifndef V8_OBJECTS_LITERAL_OBJECTS_INL_H_
+#define V8_OBJECTS_LITERAL_OBJECTS_INL_H_
#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
@@ -48,4 +48,4 @@ ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
#include "src/objects/object-macros-undef.h"
-#endif // V8_LITERAL_OBJECTS_INL_H_
+#endif // V8_OBJECTS_LITERAL_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 13f8b00878..ab673aad80 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -423,6 +423,10 @@ void ClassBoilerplate::AddToElementsTemplate(
Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
Isolate* isolate, ClassLiteral* expr) {
+ // Create a non-caching handle scope to ensure that the temporary handle used
+ // by ObjectDescriptor for passing Smis around does not corrupt handle cache
+ // in CanonicalHandleScope.
+ HandleScope scope(isolate);
Factory* factory = isolate->factory();
ObjectDescriptor static_desc;
ObjectDescriptor instance_desc;
@@ -509,11 +513,14 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
case ClassLiteral::Property::SETTER:
value_kind = ClassBoilerplate::kSetter;
break;
- case ClassLiteral::Property::FIELD:
+ case ClassLiteral::Property::PUBLIC_FIELD:
if (property->is_computed_name()) {
++dynamic_argument_index;
}
continue;
+ case ClassLiteral::Property::PRIVATE_FIELD:
+ DCHECK(!property->is_computed_name());
+ continue;
}
ObjectDescriptor& desc =
@@ -580,7 +587,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
class_boilerplate->set_instance_computed_properties(
*instance_desc.computed_properties());
- return class_boilerplate;
+ return scope.CloseAndEscape(class_boilerplate);
}
} // namespace internal
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index c78f947b3a..250a998f61 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -503,6 +503,7 @@ bool Map::IsJSObjectMap() const {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
+bool Map::IsJSPromiseMap() const { return instance_type() == JS_PROMISE_TYPE; }
bool Map::IsJSArrayMap() const { return instance_type() == JS_ARRAY_TYPE; }
bool Map::IsJSFunctionMap() const {
return instance_type() == JS_FUNCTION_TYPE;
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index bf0d843884..3bc9dd17ff 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -26,6 +26,7 @@ namespace internal {
V(CodeDataContainer) \
V(ConsString) \
V(DataObject) \
+ V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -713,6 +714,7 @@ class Map : public HeapObject {
inline bool IsPrimitiveMap() const;
inline bool IsJSReceiverMap() const;
inline bool IsJSObjectMap() const;
+ inline bool IsJSPromiseMap() const;
inline bool IsJSArrayMap() const;
inline bool IsJSFunctionMap() const;
inline bool IsStringMap() const;
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
new file mode 100644
index 0000000000..71a9ea20ec
--- /dev/null
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MICROTASK_INL_H_
+#define V8_OBJECTS_MICROTASK_INL_H_
+
+#include "src/objects/microtask.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(Microtask)
+CAST_ACCESSOR(CallbackTask)
+CAST_ACCESSOR(CallableTask)
+
+ACCESSORS(CallableTask, callable, JSReceiver, kCallableOffset)
+ACCESSORS(CallableTask, context, Context, kContextOffset)
+
+ACCESSORS(CallbackTask, callback, Foreign, kCallbackOffset)
+ACCESSORS(CallbackTask, data, Foreign, kDataOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MICROTASK_INL_H_
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
new file mode 100644
index 0000000000..33f121aa2c
--- /dev/null
+++ b/deps/v8/src/objects/microtask.h
@@ -0,0 +1,77 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MICROTASK_H_
+#define V8_OBJECTS_MICROTASK_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Abstract base class for all microtasks that can be scheduled on the
+// microtask queue. This class merely serves the purpose of a marker
+// interface.
+class Microtask : public Struct {
+ public:
+ // Dispatched behavior.
+ DECL_CAST(Microtask)
+ DECL_VERIFIER(Microtask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Microtask);
+};
+
+// A CallbackTask is a special Microtask that allows us to schedule
+// C++ microtask callbacks on the microtask queue. This is heavily
+// used by Blink for example.
+class CallbackTask : public Microtask {
+ public:
+ DECL_ACCESSORS(callback, Foreign)
+ DECL_ACCESSORS(data, Foreign)
+
+ static const int kCallbackOffset = Microtask::kHeaderSize;
+ static const int kDataOffset = kCallbackOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(CallbackTask)
+ DECL_PRINTER(CallbackTask)
+ DECL_VERIFIER(CallbackTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CallbackTask)
+};
+
+// A CallableTask is a special (internal) Microtask that allows us to
+// schedule arbitrary callables on the microtask queue. We use this
+// for various tests of the microtask queue.
+class CallableTask : public Microtask {
+ public:
+ DECL_ACCESSORS(callable, JSReceiver)
+ DECL_ACCESSORS(context, Context)
+
+ static const int kCallableOffset = Microtask::kHeaderSize;
+ static const int kContextOffset = kCallableOffset + kPointerSize;
+ static const int kSize = kContextOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(CallableTask)
+ DECL_PRINTER(CallableTask)
+ DECL_VERIFIER(CallableTask)
+ void BriefPrintDetails(std::ostream& os);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CallableTask);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MICROTASK_H_
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index fe374d3fc6..9cf3bc4d2a 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -24,17 +24,16 @@ class ModuleInfoEntry;
class String;
class Zone;
-// A Module object is a mapping from export names to cells
-// This is still very much in flux.
+// The runtime representation of an ECMAScript module.
class Module : public Struct {
public:
DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
- // The code representing this Module, or an abstraction thereof.
- // This is either a SharedFunctionInfo or a JSFunction or a ModuleInfo
- // depending on whether the module has been instantiated and evaluated. See
+ // The code representing this module, or an abstraction thereof.
+ // This is either a SharedFunctionInfo, a JSFunction, a JSGeneratorObject, or
+ // a ModuleInfo, depending on the state (status) the module is in. See
// Module::ModuleVerify() for the precise invariant.
DECL_ACCESSORS(code, Object)
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index d59a3f54a3..b4ebeb632b 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -27,6 +27,20 @@ BOOL_ACCESSORS(Symbol, flags, is_interesting_symbol, kInterestingSymbolBit)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
+bool Symbol::is_private_field() const {
+ bool value = BooleanBit::get(flags(), kPrivateFieldBit);
+ DCHECK_IMPLIES(value, is_private());
+ return value;
+}
+
+void Symbol::set_is_private_field() {
+ int old_value = flags();
+ // TODO(gsathya): Re-order the bits to have these next to each other
+ // and just do the bit shifts once.
+ set_flags(BooleanBit::set(old_value, kPrivateBit, true) |
+ BooleanBit::set(old_value, kPrivateFieldBit, true));
+}
+
bool Name::IsUniqueName() const {
uint32_t type = map()->instance_type();
return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
@@ -89,6 +103,13 @@ bool Name::IsPrivate() {
return this->IsSymbol() && Symbol::cast(this)->is_private();
}
+bool Name::IsPrivateField() {
+ bool is_private_field =
+ this->IsSymbol() && Symbol::cast(this)->is_private_field();
+ DCHECK_IMPLIES(is_private_field, IsPrivate());
+ return is_private_field;
+}
+
bool Name::AsArrayIndex(uint32_t* index) {
return IsString() && String::cast(this)->AsArrayIndex(index);
}
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index dd5b3692f9..e5cfe7733b 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -44,6 +44,10 @@ class Name : public HeapObject {
// If the name is private, it can only name own properties.
inline bool IsPrivate();
+ // If the name is a private field, it should behave like a private
+ // symbol but also throw on property access miss.
+ inline bool IsPrivateField();
+
inline bool IsUniqueName() const;
static inline bool ContainsCachedArrayIndex(uint32_t hash);
@@ -160,6 +164,14 @@ class Symbol : public Name {
// Symbol.keyFor on such a symbol simply needs to return the attached name.
DECL_BOOLEAN_ACCESSORS(is_public)
+ // [is_private_field]: Whether this is a private field. Private fields
+ // are the same as private symbols except they throw on missing
+ // property access.
+ //
+ // This also sets the is_private bit.
+ inline bool is_private_field() const;
+ inline void set_is_private_field();
+
DECL_CAST(Symbol)
// Dispatched behavior.
@@ -176,6 +188,7 @@ class Symbol : public Name {
static const int kWellKnownSymbolBit = 1;
static const int kPublicBit = 2;
static const int kInterestingSymbolBit = 3;
+ static const int kPrivateFieldBit = 4;
typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
// No weak fields.
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index f81dc29504..d8ca9355ad 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
#undef DECL_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 604942a272..52835bce9b 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -7,6 +7,8 @@
// Note 2: This file is deliberately missing the include guards (the undeffing
// approach wouldn't work otherwise).
+//
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
// The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used
// for fields that can be written to and read from multiple threads at the same
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
new file mode 100644
index 0000000000..4283f0aa19
--- /dev/null
+++ b/deps/v8/src/objects/promise-inl.h
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROMISE_INL_H_
+#define V8_OBJECTS_PROMISE_INL_H_
+
+#include "src/objects/promise.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(PromiseCapability)
+CAST_ACCESSOR(PromiseReaction)
+CAST_ACCESSOR(PromiseReactionJobTask)
+CAST_ACCESSOR(PromiseFulfillReactionJobTask)
+CAST_ACCESSOR(PromiseRejectReactionJobTask)
+CAST_ACCESSOR(PromiseResolveThenableJobTask)
+
+ACCESSORS(PromiseReaction, next, Object, kNextOffset)
+ACCESSORS(PromiseReaction, reject_handler, HeapObject, kRejectHandlerOffset)
+ACCESSORS(PromiseReaction, fulfill_handler, HeapObject, kFulfillHandlerOffset)
+ACCESSORS(PromiseReaction, payload, HeapObject, kPayloadOffset)
+
+ACCESSORS(PromiseResolveThenableJobTask, context, Context, kContextOffset)
+ACCESSORS(PromiseResolveThenableJobTask, promise_to_resolve, JSPromise,
+ kPromiseToResolveOffset)
+ACCESSORS(PromiseResolveThenableJobTask, then, JSReceiver, kThenOffset)
+ACCESSORS(PromiseResolveThenableJobTask, thenable, JSReceiver, kThenableOffset)
+
+ACCESSORS(PromiseReactionJobTask, context, Context, kContextOffset)
+ACCESSORS(PromiseReactionJobTask, argument, Object, kArgumentOffset);
+ACCESSORS(PromiseReactionJobTask, handler, HeapObject, kHandlerOffset);
+ACCESSORS(PromiseReactionJobTask, payload, HeapObject, kPayloadOffset);
+
+ACCESSORS(PromiseCapability, promise, HeapObject, kPromiseOffset)
+ACCESSORS(PromiseCapability, resolve, Object, kResolveOffset)
+ACCESSORS(PromiseCapability, reject, Object, kRejectOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROMISE_INL_H_
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
new file mode 100644
index 0000000000..36ef4afe1d
--- /dev/null
+++ b/deps/v8/src/objects/promise.h
@@ -0,0 +1,168 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_PROMISE_H_
+#define V8_OBJECTS_PROMISE_H_
+
+#include "src/objects/microtask.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Struct to hold state required for PromiseReactionJob. See the comment on the
+// PromiseReaction below for details on how this is being managed to reduce the
+// memory and allocation overhead. This is the base class for the concrete
+//
+// - PromiseFulfillReactionJobTask
+// - PromiseRejectReactionJobTask
+//
+// classes, which are used to represent either reactions, and we distinguish
+// them by their instance types.
+class PromiseReactionJobTask : public Microtask {
+ public:
+ DECL_ACCESSORS(argument, Object)
+ DECL_ACCESSORS(context, Context)
+ // [handler]: This is either a Code object, a Callable or Undefined.
+ DECL_ACCESSORS(handler, HeapObject)
+ // [payload]: Usually a JSPromise or a PromiseCapability.
+ DECL_ACCESSORS(payload, HeapObject)
+
+ static const int kArgumentOffset = Microtask::kHeaderSize;
+ static const int kContextOffset = kArgumentOffset + kPointerSize;
+ static const int kHandlerOffset = kContextOffset + kPointerSize;
+ static const int kPayloadOffset = kHandlerOffset + kPointerSize;
+ static const int kSize = kPayloadOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseReactionJobTask)
+ DECL_VERIFIER(PromiseReactionJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobTask);
+};
+
+// Struct to hold state required for a PromiseReactionJob of type "Fulfill".
+class PromiseFulfillReactionJobTask : public PromiseReactionJobTask {
+ public:
+ // Dispatched behavior.
+ DECL_CAST(PromiseFulfillReactionJobTask)
+ DECL_PRINTER(PromiseFulfillReactionJobTask)
+ DECL_VERIFIER(PromiseFulfillReactionJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseFulfillReactionJobTask);
+};
+
+// Struct to hold state required for a PromiseReactionJob of type "Reject".
+class PromiseRejectReactionJobTask : public PromiseReactionJobTask {
+ public:
+ // Dispatched behavior.
+ DECL_CAST(PromiseRejectReactionJobTask)
+ DECL_PRINTER(PromiseRejectReactionJobTask)
+ DECL_VERIFIER(PromiseRejectReactionJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseRejectReactionJobTask);
+};
+
+// A container struct to hold state required for PromiseResolveThenableJob.
+class PromiseResolveThenableJobTask : public Microtask {
+ public:
+ DECL_ACCESSORS(context, Context)
+ DECL_ACCESSORS(promise_to_resolve, JSPromise)
+ DECL_ACCESSORS(then, JSReceiver)
+ DECL_ACCESSORS(thenable, JSReceiver)
+
+ static const int kContextOffset = Microtask::kHeaderSize;
+ static const int kPromiseToResolveOffset = kContextOffset + kPointerSize;
+ static const int kThenOffset = kPromiseToResolveOffset + kPointerSize;
+ static const int kThenableOffset = kThenOffset + kPointerSize;
+ static const int kSize = kThenableOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseResolveThenableJobTask)
+ DECL_PRINTER(PromiseResolveThenableJobTask)
+ DECL_VERIFIER(PromiseResolveThenableJobTask)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobTask);
+};
+
+// Struct to hold the state of a PromiseCapability.
+class PromiseCapability : public Struct {
+ public:
+ DECL_ACCESSORS(promise, HeapObject)
+ DECL_ACCESSORS(resolve, Object)
+ DECL_ACCESSORS(reject, Object)
+
+ static const int kPromiseOffset = Struct::kHeaderSize;
+ static const int kResolveOffset = kPromiseOffset + kPointerSize;
+ static const int kRejectOffset = kResolveOffset + kPointerSize;
+ static const int kSize = kRejectOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseCapability)
+ DECL_PRINTER(PromiseCapability)
+ DECL_VERIFIER(PromiseCapability)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseCapability);
+};
+
+// A representation of promise reaction. This differs from the specification
+// in that the PromiseReaction here holds both handlers for the fulfill and
+// the reject case. When a JSPromise is eventually resolved (either via
+// fulfilling it or rejecting it), we morph this PromiseReaction object in
+// memory into a proper PromiseReactionJobTask and schedule it on the queue
+// of microtasks. So the size of PromiseReaction and the size of the
+// PromiseReactionJobTask has to be same for this to work.
+//
+// The PromiseReaction::payload field usually holds a JSPromise
+// instance (in the fast case of a native promise) or a PromiseCapability
+// in case of a custom promise. For await we store the JSGeneratorObject
+// here and use custom Code handlers.
+//
+// We need to keep the context in the PromiseReaction so that we can run
+// the default handlers (in case they are undefined) in the proper context.
+//
+// The PromiseReaction objects form a singly-linked list, terminated by
+// Smi 0. On the JSPromise instance they are linked in reverse order,
+// and are turned into the proper order again when scheduling them on
+// the microtask queue.
+class PromiseReaction : public Struct {
+ public:
+ enum Type { kFulfill, kReject };
+
+ DECL_ACCESSORS(next, Object)
+ // [reject_handler]: This is either a Code object, a Callable or Undefined.
+ DECL_ACCESSORS(reject_handler, HeapObject)
+ // [fulfill_handler]: This is either a Code object, a Callable or Undefined.
+ DECL_ACCESSORS(fulfill_handler, HeapObject)
+ // [payload]: Usually a JSPromise or a PromiseCapability.
+ DECL_ACCESSORS(payload, HeapObject)
+
+ static const int kNextOffset = Struct::kHeaderSize;
+ static const int kRejectHandlerOffset = kNextOffset + kPointerSize;
+ static const int kFulfillHandlerOffset = kRejectHandlerOffset + kPointerSize;
+ static const int kPayloadOffset = kFulfillHandlerOffset + kPointerSize;
+ static const int kSize = kPayloadOffset + kPointerSize;
+
+ // Dispatched behavior.
+ DECL_CAST(PromiseReaction)
+ DECL_PRINTER(PromiseReaction)
+ DECL_VERIFIER(PromiseReaction)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReaction);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_PROMISE_H_
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 82f835ff0c..d199d7f6ec 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -781,47 +781,6 @@ void ScopeInfo::ModuleVariable(int i, String** name, int* index,
}
}
-#ifdef DEBUG
-
-static void PrintList(const char* list_name, int nof_internal_slots, int start,
- int end, ScopeInfo* scope_info) {
- if (start < end) {
- PrintF("\n // %s\n", list_name);
- if (nof_internal_slots > 0) {
- PrintF(" %2d - %2d [internal slots]\n", 0, nof_internal_slots - 1);
- }
- for (int i = nof_internal_slots; start < end; ++i, ++start) {
- PrintF(" %2d ", i);
- String::cast(scope_info->get(start))->ShortPrint();
- PrintF("\n");
- }
- }
-}
-
-void ScopeInfo::Print() {
- PrintF("ScopeInfo ");
- if (HasFunctionName()) {
- FunctionName()->ShortPrint();
- } else {
- PrintF("/* no function name */");
- }
- PrintF("{");
-
- if (length() > 0) {
- PrintList("parameters", 0, ParameterNamesIndex(),
- ParameterNamesIndex() + ParameterCount(), this);
- PrintList("stack slots", 0, StackLocalNamesIndex(),
- StackLocalNamesIndex() + StackLocalCount(), this);
- PrintList("context slots", Context::MIN_CONTEXT_SLOTS,
- ContextLocalNamesIndex(),
- ContextLocalNamesIndex() + ContextLocalCount(), this);
- // TODO(neis): Print module stuff if present.
- }
-
- PrintF("}\n");
-}
-#endif // DEBUG
-
Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
Handle<Object> export_name,
Handle<Object> local_name,
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 3a8459a204..0532686ba0 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -35,6 +35,7 @@ class Zone;
class ScopeInfo : public FixedArray {
public:
DECL_CAST(ScopeInfo)
+ DECL_PRINTER(ScopeInfo)
// Return the type of this scope.
ScopeType scope_type();
@@ -187,10 +188,6 @@ class ScopeInfo : public FixedArray {
// Serializes empty scope info.
V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
-#ifdef DEBUG
- void Print();
-#endif
-
// The layout of the static part of a ScopeInfo is as follows. Each entry is
// numeric and occupies one array slot.
// 1. A set of properties of the scope.
@@ -307,7 +304,7 @@ class ScopeInfo : public FixedArray {
class HasSimpleParametersField
: public BitField<bool, AsmModuleField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 11> {};
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 5> {};
class HasOuterScopeInfoField
: public BitField<bool, FunctionKindField::kNext, 1> {};
class IsDebugEvaluateScopeField
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 57a72754b5..2f3b32f17c 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -27,8 +27,6 @@ ACCESSORS(SharedFunctionInfo, raw_name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
kFeedbackMetadataOffset)
-ACCESSORS(SharedFunctionInfo, instance_class_name, String,
- kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
@@ -127,9 +125,10 @@ FunctionKind SharedFunctionInfo::kind() const {
}
void SharedFunctionInfo::set_kind(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
int hints = compiler_hints();
hints = FunctionKindBits::update(hints, kind);
+ hints = IsClassConstructorBit::update(hints, IsClassConstructor(kind));
+ hints = IsDerivedConstructorBit::update(hints, IsDerivedConstructor(kind));
set_compiler_hints(hints);
UpdateFunctionMapIndex();
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 8e996042c0..077088dd28 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -136,9 +136,6 @@ class SharedFunctionInfo : public HeapObject {
DECL_INT_ACCESSORS(unique_id)
#endif
- // [instance class name]: class name for instances.
- DECL_ACCESSORS(instance_class_name, String)
-
// [function data]: This field holds some additional data for function.
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
@@ -342,7 +339,11 @@ class SharedFunctionInfo : public HeapObject {
static Handle<Object> GetSourceCode(Handle<SharedFunctionInfo> shared);
static Handle<Object> GetSourceCodeHarmony(Handle<SharedFunctionInfo> shared);
- // Tells whether this function should be subject to debugging.
+ // Tells whether this function should be subject to debugging, e.g. for
+ // - scope inspection
+ // - internal break points
+ // - coverage and type profile
+ // - error stack trace
inline bool IsSubjectToDebugging();
// Whether this function is defined in user-provided JavaScript code.
@@ -424,7 +425,6 @@ class SharedFunctionInfo : public HeapObject {
V(kScopeInfoOffset, kPointerSize) \
V(kOuterScopeInfoOffset, kPointerSize) \
V(kConstructStubOffset, kPointerSize) \
- V(kInstanceClassNameOffset, kPointerSize) \
V(kFunctionDataOffset, kPointerSize) \
V(kScriptOffset, kPointerSize) \
V(kDebugInfoOffset, kPointerSize) \
@@ -469,7 +469,9 @@ class SharedFunctionInfo : public HeapObject {
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
V(IsWrappedBit, bool, 1, _) \
- V(FunctionKindBits, FunctionKind, 11, _) \
+ V(IsClassConstructorBit, bool, 1, _) \
+ V(IsDerivedConstructorBit, bool, 1, _) \
+ V(FunctionKindBits, FunctionKind, 5, _) \
V(HasDuplicateParametersBit, bool, 1, _) \
V(AllowLazyCompilationBit, bool, 1, _) \
V(NeedsHomeObjectBit, bool, 1, _) \
@@ -487,12 +489,6 @@ class SharedFunctionInfo : public HeapObject {
DisabledOptimizationReasonBits::kMax);
STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
- // Masks for checking if certain FunctionKind bits are set without fully
- // decoding of the FunctionKind bit field.
- static const int kClassConstructorMask = FunctionKind::kClassConstructor
- << FunctionKindBits::kShift;
- static const int kDerivedConstructorMask = FunctionKind::kDerivedConstructor
- << FunctionKindBits::kShift;
// Bit positions in |debugger_hints|.
#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 066fc6d879..dee56fb7f7 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -314,7 +314,7 @@ class String : public Name {
uint32_t inline ToValidIndex(Object* number);
// Trimming.
- enum TrimMode { kTrim, kTrimLeft, kTrimRight };
+ enum TrimMode { kTrim, kTrimStart, kTrimEnd };
static Handle<String> Trim(Handle<String> string, TrimMode mode);
DECL_CAST(String)
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 24f306aa68..e35f3f137b 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -12,117 +12,41 @@
namespace v8 {
namespace internal {
-bool TemplateObjectDescription::Equals(
- TemplateObjectDescription const* that) const {
- if (this->raw_strings()->length() == that->raw_strings()->length()) {
- for (int i = this->raw_strings()->length(); --i >= 0;) {
- if (this->raw_strings()->get(i) != that->raw_strings()->get(i)) {
- return false;
- }
- }
- return true;
- }
- return false;
-}
-
// static
-Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
- Handle<TemplateObjectDescription> description,
- Handle<Context> native_context) {
- DCHECK(native_context->IsNativeContext());
- Isolate* const isolate = native_context->GetIsolate();
-
- // Check if we already have a [[TemplateMap]] for the {native_context},
- // and if not, just allocate one on the fly (which will be set below).
- Handle<TemplateMap> template_map =
- native_context->template_map()->IsUndefined(isolate)
- ? TemplateMap::New(isolate)
- : handle(TemplateMap::cast(native_context->template_map()), isolate);
-
- // Check if we already have an appropriate entry.
- Handle<JSArray> template_object;
- if (!TemplateMap::Lookup(template_map, description)
- .ToHandle(&template_object)) {
- // Create the raw object from the {raw_strings}.
- Handle<FixedArray> raw_strings(description->raw_strings(), isolate);
- Handle<JSArray> raw_object = isolate->factory()->NewJSArrayWithElements(
- raw_strings, PACKED_ELEMENTS, raw_strings->length(), TENURED);
-
- // Create the template object from the {cooked_strings}.
- Handle<FixedArray> cooked_strings(description->cooked_strings(), isolate);
- template_object = isolate->factory()->NewJSArrayWithElements(
- cooked_strings, PACKED_ELEMENTS, cooked_strings->length(), TENURED);
-
- // Freeze the {raw_object}.
- JSObject::SetIntegrityLevel(raw_object, FROZEN, kThrowOnError).ToChecked();
-
- // Install a "raw" data property for {raw_object} on {template_object}.
- PropertyDescriptor raw_desc;
- raw_desc.set_value(raw_object);
- raw_desc.set_configurable(false);
- raw_desc.set_enumerable(false);
- raw_desc.set_writable(false);
- JSArray::DefineOwnProperty(isolate, template_object,
- isolate->factory()->raw_string(), &raw_desc,
- kThrowOnError)
- .ToChecked();
-
- // Freeze the {template_object} as well.
- JSObject::SetIntegrityLevel(template_object, FROZEN, kThrowOnError)
- .ToChecked();
-
- // Remember the {template_object} in the {template_map}.
- template_map = TemplateMap::Add(template_map, description, template_object);
- native_context->set_template_map(*template_map);
- }
+Handle<JSArray> TemplateObjectDescription::CreateTemplateObject(
+ Handle<TemplateObjectDescription> description) {
+ Isolate* const isolate = description->GetIsolate();
+
+ // Create the raw object from the {raw_strings}.
+ Handle<FixedArray> raw_strings(description->raw_strings(), isolate);
+ Handle<JSArray> raw_object = isolate->factory()->NewJSArrayWithElements(
+ raw_strings, PACKED_ELEMENTS, raw_strings->length(), TENURED);
+
+ // Create the template object from the {cooked_strings}.
+ Handle<FixedArray> cooked_strings(description->cooked_strings(), isolate);
+ Handle<JSArray> template_object = isolate->factory()->NewJSArrayWithElements(
+ cooked_strings, PACKED_ELEMENTS, cooked_strings->length(), TENURED);
+
+ // Freeze the {raw_object}.
+ JSObject::SetIntegrityLevel(raw_object, FROZEN, kThrowOnError).ToChecked();
+
+ // Install a "raw" data property for {raw_object} on {template_object}.
+ PropertyDescriptor raw_desc;
+ raw_desc.set_value(raw_object);
+ raw_desc.set_configurable(false);
+ raw_desc.set_enumerable(false);
+ raw_desc.set_writable(false);
+ JSArray::DefineOwnProperty(isolate, template_object,
+ isolate->factory()->raw_string(), &raw_desc,
+ kThrowOnError)
+ .ToChecked();
+
+ // Freeze the {template_object} as well.
+ JSObject::SetIntegrityLevel(template_object, FROZEN, kThrowOnError)
+ .ToChecked();
return template_object;
}
-// static
-bool TemplateMapShape::IsMatch(TemplateObjectDescription* key, Object* value) {
- return key->Equals(TemplateObjectDescription::cast(value));
-}
-
-// static
-uint32_t TemplateMapShape::Hash(Isolate* isolate,
- TemplateObjectDescription* key) {
- return key->hash();
-}
-
-// static
-uint32_t TemplateMapShape::HashForObject(Isolate* isolate, Object* object) {
- return Hash(isolate, TemplateObjectDescription::cast(object));
-}
-
-// static
-Handle<TemplateMap> TemplateMap::New(Isolate* isolate) {
- return HashTable::New(isolate, 0);
-}
-
-// static
-MaybeHandle<JSArray> TemplateMap::Lookup(
- Handle<TemplateMap> template_map, Handle<TemplateObjectDescription> key) {
- int const entry = template_map->FindEntry(*key);
- if (entry == kNotFound) return MaybeHandle<JSArray>();
- int const index = EntryToIndex(entry);
- return handle(JSArray::cast(template_map->get(index + 1)));
-}
-
-// static
-Handle<TemplateMap> TemplateMap::Add(Handle<TemplateMap> template_map,
- Handle<TemplateObjectDescription> key,
- Handle<JSArray> value) {
- DCHECK_EQ(kNotFound, template_map->FindEntry(*key));
- template_map = EnsureCapacity(template_map, 1);
- uint32_t const hash = ShapeT::Hash(key->GetIsolate(), *key);
- int const entry = template_map->FindInsertionEntry(hash);
- int const index = EntryToIndex(entry);
- template_map->set(index + 0, *key);
- template_map->set(index + 1, *value);
- template_map->ElementAdded();
- return template_map;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index cac29a3530..6c1a99831a 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -16,61 +16,25 @@ namespace internal {
// TemplateObjectDescription is a triple of hash, raw strings and cooked
// strings for tagged template literals. Used to communicate with the runtime
-// for template object creation within the {Runtime_GetTemplateObject} method.
-class TemplateObjectDescription final : public Tuple3 {
+// for template object creation within the {Runtime_CreateTemplateObject}
+// method.
+class TemplateObjectDescription final : public Tuple2 {
public:
- DECL_INT_ACCESSORS(hash)
DECL_ACCESSORS(raw_strings, FixedArray)
DECL_ACCESSORS(cooked_strings, FixedArray)
- bool Equals(TemplateObjectDescription const* that) const;
-
- static Handle<JSArray> GetTemplateObject(
- Handle<TemplateObjectDescription> description,
- Handle<Context> native_context);
+ static Handle<JSArray> CreateTemplateObject(
+ Handle<TemplateObjectDescription> description);
DECL_CAST(TemplateObjectDescription)
- static constexpr int kHashOffset = kValue1Offset;
- static constexpr int kRawStringsOffset = kValue2Offset;
- static constexpr int kCookedStringsOffset = kValue3Offset;
+ static constexpr int kRawStringsOffset = kValue1Offset;
+ static constexpr int kCookedStringsOffset = kValue2Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateObjectDescription);
};
-class TemplateMapShape final : public BaseShape<TemplateObjectDescription*> {
- public:
- static bool IsMatch(TemplateObjectDescription* key, Object* value);
- static uint32_t Hash(Isolate* isolate, TemplateObjectDescription* key);
- static uint32_t HashForObject(Isolate* isolate, Object* object);
-
- static constexpr int kPrefixSize = 0;
- static constexpr int kEntrySize = 2;
-};
-
-class TemplateMap final : public HashTable<TemplateMap, TemplateMapShape> {
- public:
- static Handle<TemplateMap> New(Isolate* isolate);
-
- // Tries to lookup the given {key} in the {template_map}. Returns the
- // value if it's found, otherwise returns an empty MaybeHandle.
- WARN_UNUSED_RESULT static MaybeHandle<JSArray> Lookup(
- Handle<TemplateMap> template_map, Handle<TemplateObjectDescription> key);
-
- // Adds the {key} / {value} pair to the {template_map} and returns the
- // new TemplateMap (we might need to re-allocate). This assumes that
- // there's no entry for {key} in the {template_map} already.
- static Handle<TemplateMap> Add(Handle<TemplateMap> template_map,
- Handle<TemplateObjectDescription> key,
- Handle<JSArray> value);
-
- DECL_CAST(TemplateMap)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateMap);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index fbbdd8b715..24218df199 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -5,7 +5,6 @@ gsathya@chromium.org
littledan@chromium.org
marja@chromium.org
neis@chromium.org
-rossberg@chromium.org
verwaest@chromium.org
# COMPONENT: Blink>JavaScript>Parser
diff --git a/deps/v8/src/parsing/background-parsing-task.cc b/deps/v8/src/parsing/background-parsing-task.cc
deleted file mode 100644
index cb811566df..0000000000
--- a/deps/v8/src/parsing/background-parsing-task.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/parsing/background-parsing-task.h"
-
-#include "src/counters.h"
-#include "src/objects-inl.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/scanner-character-streams.h"
-#include "src/vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void StreamedSource::Release() {
- parser.reset();
- info.reset();
-}
-
-BackgroundParsingTask::BackgroundParsingTask(
- StreamedSource* source, ScriptCompiler::CompileOptions options,
- int stack_size, Isolate* isolate)
- : source_(source),
- stack_size_(stack_size),
- script_data_(nullptr),
- timer_(isolate->counters()->compile_script_on_background()) {
- // We don't set the context to the CompilationInfo yet, because the background
- // thread cannot do anything with it anyway. We set it just before compilation
- // on the foreground thread.
- DCHECK(options == ScriptCompiler::kProduceParserCache ||
- options == ScriptCompiler::kProduceCodeCache ||
- options == ScriptCompiler::kProduceFullCodeCache ||
- options == ScriptCompiler::kNoCompileOptions ||
- options == ScriptCompiler::kEagerCompile);
-
- VMState<PARSER> state(isolate);
-
- // Prepare the data for the internalization phase and compilation phase, which
- // will happen in the main thread after parsing.
- ParseInfo* info = new ParseInfo(isolate->allocator());
- info->InitFromIsolate(isolate);
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
- } else {
- info->set_runtime_call_stats(nullptr);
- }
- info->set_toplevel();
- std::unique_ptr<Utf16CharacterStream> stream(
- ScannerStream::For(source->source_stream.get(), source->encoding,
- info->runtime_call_stats()));
- info->set_character_stream(std::move(stream));
- info->set_unicode_cache(&source_->unicode_cache);
- info->set_compile_options(options);
- info->set_allow_lazy_parsing();
- if (V8_UNLIKELY(info->block_coverage_enabled())) {
- info->AllocateSourceRangeMap();
- }
- info->set_cached_data(&script_data_);
- LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
- info->set_language_mode(
- stricter_language_mode(info->language_mode(), language_mode));
-
- source->info.reset(info);
- allocator_ = isolate->allocator();
-
- // Parser needs to stay alive for finalizing the parsing on the main
- // thread.
- source_->parser.reset(new Parser(source_->info.get()));
- source_->parser->DeserializeScopeChain(source_->info.get(),
- MaybeHandle<ScopeInfo>());
-}
-
-void BackgroundParsingTask::Run() {
- TimedHistogramScope timer(timer_);
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- source_->info->set_on_background_thread(true);
-
- // Reset the stack limit of the parser to reflect correctly that we're on a
- // background thread.
- uintptr_t old_stack_limit = source_->info->stack_limit();
- uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
- source_->info->set_stack_limit(stack_limit);
- source_->parser->set_stack_limit(stack_limit);
-
- source_->parser->ParseOnBackground(source_->info.get());
- if (FLAG_background_compile && source_->info->literal() != nullptr) {
- // Parsing has succeeded, compile.
- source_->outer_function_job = Compiler::CompileTopLevelOnBackgroundThread(
- source_->info.get(), allocator_, &source_->inner_function_jobs);
- }
-
- if (script_data_ != nullptr) {
- source_->cached_data.reset(new ScriptCompiler::CachedData(
- script_data_->data(), script_data_->length(),
- ScriptCompiler::CachedData::BufferOwned));
- script_data_->ReleaseDataOwnership();
- delete script_data_;
- script_data_ = nullptr;
- }
-
- source_->info->EmitBackgroundParseStatisticsOnBackgroundThread();
-
- source_->info->set_on_background_thread(false);
- source_->info->set_stack_limit(old_stack_limit);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/parsing/background-parsing-task.h b/deps/v8/src/parsing/background-parsing-task.h
deleted file mode 100644
index eb3ed61e2e..0000000000
--- a/deps/v8/src/parsing/background-parsing-task.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_BACKGROUND_PARSING_TASK_H_
-#define V8_PARSING_BACKGROUND_PARSING_TASK_H_
-
-#include <memory>
-
-#include "include/v8.h"
-#include "src/base/platform/platform.h"
-#include "src/base/platform/semaphore.h"
-#include "src/compiler.h"
-#include "src/parsing/parse-info.h"
-#include "src/unicode-cache.h"
-
-namespace v8 {
-namespace internal {
-
-class Parser;
-class ScriptData;
-class TimedHistogram;
-
-// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
-// data which needs to be transmitted between threads for background parsing,
-// finalizing it on the main thread, and compiling on the main thread.
-struct StreamedSource {
- StreamedSource(ScriptCompiler::ExternalSourceStream* source_stream,
- ScriptCompiler::StreamedSource::Encoding encoding)
- : source_stream(source_stream), encoding(encoding) {}
-
- void Release();
-
- // Internal implementation of v8::ScriptCompiler::StreamedSource.
- std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
- ScriptCompiler::StreamedSource::Encoding encoding;
- std::unique_ptr<ScriptCompiler::CachedData> cached_data;
-
- // Data needed for parsing, and data needed to to be passed between thread
- // between parsing and compilation. These need to be initialized before the
- // compilation starts.
- UnicodeCache unicode_cache;
- std::unique_ptr<ParseInfo> info;
- std::unique_ptr<Parser> parser;
-
- // Data needed for finalizing compilation after background compilation.
- std::unique_ptr<CompilationJob> outer_function_job;
- CompilationJobList inner_function_jobs;
-
- // Prevent copying.
- StreamedSource(const StreamedSource&) = delete;
- StreamedSource& operator=(const StreamedSource&) = delete;
-};
-
-class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
- public:
- BackgroundParsingTask(StreamedSource* source,
- ScriptCompiler::CompileOptions options, int stack_size,
- Isolate* isolate);
-
- virtual void Run();
-
- private:
- StreamedSource* source_; // Not owned.
- int stack_size_;
- ScriptData* script_data_;
- AccountingAllocator* allocator_;
- TimedHistogram* timer_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PARSING_BACKGROUND_PARSING_TASK_H_
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 709d5736b5..522b650be7 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H
-#define V8_PARSING_EXPRESSION_CLASSIFIER_H
+#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H_
+#define V8_PARSING_EXPRESSION_CLASSIFIER_H_
#include "src/messages.h"
#include "src/parsing/scanner.h"
@@ -433,4 +433,4 @@ class ExpressionClassifier {
} // namespace internal
} // namespace v8
-#endif // V8_PARSING_EXPRESSION_CLASSIFIER_H
+#endif // V8_PARSING_EXPRESSION_CLASSIFIER_H_
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index b8f191dd5a..8657dab7f2 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -20,7 +20,6 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
: zone_(std::make_shared<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
extension_(nullptr),
- compile_options_(ScriptCompiler::kNoCompileOptions),
script_scope_(nullptr),
unicode_cache_(nullptr),
stack_limit_(0),
@@ -32,7 +31,6 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
function_literal_id_(FunctionLiteral::kIdTypeInvalid),
max_function_literal_id_(FunctionLiteral::kIdTypeInvalid),
character_stream_(nullptr),
- cached_data_(nullptr),
ast_value_factory_(nullptr),
ast_string_constants_(nullptr),
function_name_(nullptr),
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index e93c7137ca..5a0cf138c1 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -29,7 +29,6 @@ class DeclarationScope;
class FunctionLiteral;
class RuntimeCallStats;
class Logger;
-class ScriptData;
class SourceRangeMap;
class UnicodeCache;
class Utf16CharacterStream;
@@ -85,6 +84,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
set_on_background_thread)
FLAG_ACCESSOR(kWrappedAsFunction, is_wrapped_as_function,
set_wrapped_as_function)
+ FLAG_ACCESSOR(kAllowEvalCache, allow_eval_cache, set_allow_eval_cache)
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -106,20 +106,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
v8::Extension* extension() const { return extension_; }
void set_extension(v8::Extension* extension) { extension_ = extension; }
- ScriptData** cached_data() const { return cached_data_; }
- void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
ConsumedPreParsedScopeData* consumed_preparsed_scope_data() {
return &consumed_preparsed_scope_data_;
}
- ScriptCompiler::CompileOptions compile_options() const {
- return compile_options_;
- }
- void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
- compile_options_ = compile_options;
- }
-
DeclarationScope* script_scope() const { return script_scope_; }
void set_script_scope(DeclarationScope* script_scope) {
script_scope_ = script_scope;
@@ -263,13 +254,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
kIsAsmWasmBroken = 1 << 12,
kOnBackgroundThread = 1 << 13,
kWrappedAsFunction = 1 << 14, // Implicitly wrapped as function.
+ kAllowEvalCache = 1 << 15,
};
//------------- Inputs to parsing and scope analysis -----------------------
std::shared_ptr<Zone> zone_;
unsigned flags_;
v8::Extension* extension_;
- ScriptCompiler::CompileOptions compile_options_;
DeclarationScope* script_scope_;
UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
@@ -287,7 +278,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
- ScriptData** cached_data_; // used if available, populated if requested.
ConsumedPreParsedScopeData consumed_preparsed_scope_data_;
std::shared_ptr<AstValueFactory> ast_value_factory_;
const class AstStringConstants* ast_string_constants_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index faefe44011..2d608d5f40 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSING_PARSER_BASE_H
-#define V8_PARSING_PARSER_BASE_H
+#ifndef V8_PARSING_PARSER_BASE_H_
+#define V8_PARSING_PARSER_BASE_H_
#include <vector>
@@ -12,6 +12,7 @@
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/base/hashmap.h"
+#include "src/base/v8-fallthrough.h"
#include "src/counters.h"
#include "src/globals.h"
#include "src/log.h"
@@ -277,13 +278,13 @@ class ParserBase {
script_id_(script_id),
allow_natives_(false),
allow_harmony_do_expressions_(false),
- allow_harmony_function_sent_(false),
allow_harmony_public_fields_(false),
allow_harmony_static_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
allow_harmony_optional_catch_binding_(false),
- allow_harmony_private_fields_(false) {}
+ allow_harmony_private_fields_(false),
+ allow_eval_cache_(true) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -291,12 +292,12 @@ class ParserBase {
ALLOW_ACCESSORS(natives);
ALLOW_ACCESSORS(harmony_do_expressions);
- ALLOW_ACCESSORS(harmony_function_sent);
ALLOW_ACCESSORS(harmony_public_fields);
ALLOW_ACCESSORS(harmony_static_fields);
ALLOW_ACCESSORS(harmony_dynamic_import);
ALLOW_ACCESSORS(harmony_import_meta);
ALLOW_ACCESSORS(harmony_optional_catch_binding);
+ ALLOW_ACCESSORS(eval_cache);
#undef ALLOW_ACCESSORS
@@ -398,6 +399,9 @@ class ParserBase {
}
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ void AddSuspend() { suspend_count_++; }
+ int suspend_count() const { return suspend_count_; }
+
FunctionKind kind() const { return scope()->function_kind(); }
void RewindDestructuringAssignments(int pos) {
@@ -425,10 +429,6 @@ class ParserBase {
return &reported_errors_;
}
- ZoneList<RewritableExpressionT>* non_patterns_to_rewrite() {
- return &non_patterns_to_rewrite_;
- }
-
bool next_function_is_likely_called() const {
return next_function_is_likely_called_;
}
@@ -478,13 +478,15 @@ class ParserBase {
DeclarationScope* scope_;
ZoneList<RewritableExpressionT> destructuring_assignments_to_rewrite_;
- ZoneList<RewritableExpressionT> non_patterns_to_rewrite_;
ZoneList<typename ExpressionClassifier::Error> reported_errors_;
// A reason, if any, why this function should not be optimized.
BailoutReason dont_optimize_reason_;
+ // How many suspends are needed for this function.
+ int suspend_count_;
+
// Record whether the next (=== immediately following) function literal is
// preceded by a parenthesis / exclamation mark. Also record the previous
// state.
@@ -1084,6 +1086,8 @@ class ParserBase {
IdentifierT ParseIdentifierName(bool* ok);
+ ExpressionT ParseIdentifierNameOrPrivateName(bool* ok);
+
ExpressionT ParseRegExpLiteral(bool* ok);
ExpressionT ParsePrimaryExpression(bool* is_async, bool* ok);
@@ -1124,10 +1128,10 @@ class ParserBase {
bool* ok);
ExpressionT ParseObjectLiteral(bool* ok);
ClassLiteralPropertyT ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, ClassInfo* class_info, bool has_extends,
- bool* is_computed_name, bool* has_seen_constructor,
- ClassLiteralProperty::Kind* property_kind, bool* is_static,
- bool* has_name_static_property, bool* ok);
+ ClassLiteralChecker* checker, ClassInfo* class_info,
+ IdentifierT* property_name, bool has_extends, bool* is_computed_name,
+ bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
+ bool* is_static, bool* has_name_static_property, bool* ok);
ExpressionT ParseClassFieldInitializer(ClassInfo* class_info, bool is_static,
bool* ok);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
@@ -1407,6 +1411,7 @@ class ParserBase {
// In async generators, if there is an explicit operand to the return
// statement, await the operand.
expr = factory()->NewAwait(expr, kNoSourcePosition);
+ function_state_->AddSuspend();
}
if (is_async_function()) {
return factory()->NewAsyncReturnStatement(expr, pos, end_pos);
@@ -1451,6 +1456,10 @@ class ParserBase {
return this->scanner()->CurrentMatchesContextualEscaped(
Token::CONSTRUCTOR);
}
+ bool IsPrivateConstructor() {
+ return this->scanner()->CurrentMatchesContextualEscaped(
+ Token::PRIVATE_CONSTRUCTOR);
+ }
bool IsPrototype() {
return this->scanner()->CurrentMatchesContextualEscaped(Token::PROTOTYPE);
}
@@ -1542,13 +1551,13 @@ class ParserBase {
bool allow_natives_;
bool allow_harmony_do_expressions_;
- bool allow_harmony_function_sent_;
bool allow_harmony_public_fields_;
bool allow_harmony_static_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
bool allow_harmony_optional_catch_binding_;
bool allow_harmony_private_fields_;
+ bool allow_eval_cache_;
friend class DiscardableZoneScope;
};
@@ -1563,9 +1572,9 @@ ParserBase<Impl>::FunctionState::FunctionState(
outer_function_state_(*function_state_stack),
scope_(scope),
destructuring_assignments_to_rewrite_(16, scope->zone()),
- non_patterns_to_rewrite_(0, scope->zone()),
reported_errors_(16, scope->zone()),
dont_optimize_reason_(BailoutReason::kNoReason),
+ suspend_count_(0),
next_function_is_likely_called_(false),
previous_function_was_likely_called_(false),
contains_function_or_eval_(false) {
@@ -1778,6 +1787,27 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifierName(
}
template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseIdentifierNameOrPrivateName(bool* ok) {
+ int pos = position();
+ IdentifierT name;
+ ExpressionT key;
+ if (allow_harmony_private_fields() && peek() == Token::PRIVATE_NAME) {
+ Consume(Token::PRIVATE_NAME);
+ name = impl()->GetSymbol();
+ auto key_proxy =
+ impl()->ExpressionFromIdentifier(name, pos, InferName::kNo);
+ key_proxy->set_is_private_field();
+ key = key_proxy;
+ } else {
+ name = ParseIdentifierName(CHECK_OK);
+ key = factory()->NewStringLiteral(name, pos);
+ }
+ impl()->PushLiteralName(name);
+ return key;
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
bool* ok) {
int pos = peek_position();
@@ -1847,7 +1877,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
}
// CoverCallExpressionAndAsyncArrowHead
*is_async = true;
- /* falls through */
+ V8_FALLTHROUGH;
case Token::IDENTIFIER:
case Token::LET:
case Token::STATIC:
@@ -2237,7 +2267,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
}
return expression;
}
- // Fall-through.
+ V8_FALLTHROUGH;
default:
*name = ParseIdentifierName(CHECK_OK);
@@ -2263,8 +2293,8 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
template <typename Impl>
typename ParserBase<Impl>::ClassLiteralPropertyT
ParserBase<Impl>::ParseClassPropertyDefinition(
- ClassLiteralChecker* checker, ClassInfo* class_info, bool has_extends,
- bool* is_computed_name, bool* has_seen_constructor,
+ ClassLiteralChecker* checker, ClassInfo* class_info, IdentifierT* name,
+ bool has_extends, bool* is_computed_name, bool* has_seen_constructor,
ClassLiteralProperty::Kind* property_kind, bool* is_static,
bool* has_name_static_property, bool* ok) {
DCHECK_NOT_NULL(has_seen_constructor);
@@ -2282,19 +2312,19 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
allow_harmony_private_fields());
int name_token_position = scanner()->peek_location().beg_pos;
- IdentifierT name = impl()->NullIdentifier();
+ *name = impl()->NullIdentifier();
ExpressionT name_expression;
if (name_token == Token::STATIC) {
Consume(Token::STATIC);
name_token_position = scanner()->peek_location().beg_pos;
if (peek() == Token::LPAREN) {
kind = PropertyKind::kMethodProperty;
- name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
- name_expression = factory()->NewStringLiteral(name, position());
+ *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
+ name_expression = factory()->NewStringLiteral(*name, position());
} else if (peek() == Token::ASSIGN || peek() == Token::SEMICOLON ||
peek() == Token::RBRACE) {
- name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
- name_expression = factory()->NewStringLiteral(name, position());
+ *name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
+ name_expression = factory()->NewStringLiteral(*name, position());
} else if (peek() == Token::PRIVATE_NAME) {
DCHECK(allow_harmony_private_fields());
// TODO(gsathya): Make a better error message for this.
@@ -2303,21 +2333,21 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
return impl()->NullLiteralProperty();
} else {
*is_static = true;
- name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
+ name_expression = ParsePropertyName(name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
} else if (name_token == Token::PRIVATE_NAME) {
Consume(Token::PRIVATE_NAME);
- name = impl()->GetSymbol();
- name_expression = factory()->NewStringLiteral(name, position());
+ *name = impl()->GetSymbol();
+ name_expression = factory()->NewStringLiteral(*name, position());
} else {
- name_expression = ParsePropertyName(&name, &kind, &is_generator, &is_get,
+ name_expression = ParsePropertyName(name, &kind, &is_generator, &is_get,
&is_set, &is_async, is_computed_name,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
- if (!*has_name_static_property && *is_static && impl()->IsName(name)) {
+ if (!*has_name_static_property && *is_static && impl()->IsName(*name)) {
*has_name_static_property = true;
}
@@ -2333,13 +2363,15 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
case PropertyKind::kShorthandProperty:
case PropertyKind::kValueProperty:
if (allow_harmony_public_fields() || allow_harmony_private_fields()) {
- *property_kind = ClassLiteralProperty::FIELD;
+ *property_kind = name_token == Token::PRIVATE_NAME
+ ? ClassLiteralProperty::PRIVATE_FIELD
+ : ClassLiteralProperty::PUBLIC_FIELD;
if (*is_static && !allow_harmony_static_fields()) {
ReportUnexpectedToken(Next());
*ok = false;
return impl()->NullLiteralProperty();
}
- if (!*is_computed_name && name_token != Token::PRIVATE_NAME) {
+ if (!*is_computed_name) {
checker->CheckClassFieldName(*is_static,
CHECK_OK_CUSTOM(NullLiteralProperty));
}
@@ -2349,7 +2381,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
name_expression, initializer, *property_kind, *is_static,
*is_computed_name);
- impl()->SetFunctionNameFromPropertyName(result, name);
+ impl()->SetFunctionNameFromPropertyName(result, *name);
return result;
} else {
@@ -2377,14 +2409,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
FunctionKind kind = MethodKindFor(is_generator, is_async);
- if (!*is_static && impl()->IsConstructor(name)) {
+ if (!*is_static && impl()->IsConstructor(*name)) {
*has_seen_constructor = true;
kind = has_extends ? FunctionKind::kDerivedConstructor
: FunctionKind::kBaseConstructor;
}
ExpressionT value = impl()->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
@@ -2394,7 +2426,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
name_expression, value, *property_kind, *is_static,
*is_computed_name);
- impl()->SetFunctionNameFromPropertyName(result, name);
+ impl()->SetFunctionNameFromPropertyName(result, *name);
return result;
}
@@ -2409,14 +2441,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
// Runtime_DefineAccessorPropertyUnchecked and since we can determine
// this statically we can skip the extra runtime check.
name_expression =
- factory()->NewStringLiteral(name, name_expression->position());
+ factory()->NewStringLiteral(*name, name_expression->position());
}
FunctionKind kind = is_get ? FunctionKind::kGetterFunction
: FunctionKind::kSetterFunction;
FunctionLiteralT value = impl()->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
FunctionLiteral::kAccessorOrMethod, language_mode(), nullptr,
@@ -2430,7 +2462,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
const AstRawString* prefix =
is_get ? ast_value_factory()->get_space_string()
: ast_value_factory()->set_space_string();
- impl()->SetFunctionNameFromPropertyName(result, name, prefix);
+ impl()->SetFunctionNameFromPropertyName(result, *name, prefix);
return result;
}
case PropertyKind::kSpreadProperty:
@@ -3035,6 +3067,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
// a regular yield, given only one look-ahead token.
if (!delegating) break;
// Delegating yields require an RHS; fall through.
+ V8_FALLTHROUGH;
default:
expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
ValidateExpression(CHECK_OK);
@@ -3045,6 +3078,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
if (delegating) {
ExpressionT yieldstar = factory()->NewYieldStar(expression, pos);
impl()->RecordSuspendSourceRange(yieldstar, PositionAfterSemicolon());
+ function_state_->AddSuspend();
+ if (IsAsyncGeneratorFunction(function_state_->kind())) {
+ // iterator_close and delegated_iterator_output suspend ids.
+ function_state_->AddSuspend();
+ function_state_->AddSuspend();
+ }
return yieldstar;
}
@@ -3053,6 +3092,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
ExpressionT yield =
factory()->NewYield(expression, pos, Suspend::kOnExceptionThrow);
impl()->RecordSuspendSourceRange(yield, PositionAfterSemicolon());
+ function_state_->AddSuspend();
return yield;
}
@@ -3191,13 +3231,19 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
ExpressionT expression = ParseUnaryExpression(CHECK_OK);
ValidateExpression(CHECK_OK);
- if (op == Token::DELETE && is_strict(language_mode())) {
- if (impl()->IsIdentifier(expression)) {
+ if (op == Token::DELETE) {
+ if (impl()->IsIdentifier(expression) && is_strict(language_mode())) {
// "delete identifier" is a syntax error in strict mode.
ReportMessage(MessageTemplate::kStrictDelete);
*ok = false;
return impl()->NullExpression();
}
+
+ if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
+ ReportMessage(MessageTemplate::kDeletePrivateField);
+ *ok = false;
+ return impl()->NullExpression();
+ }
}
if (peek() == Token::EXP) {
@@ -3239,6 +3285,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
MessageTemplate::kInvalidDestructuringTarget);
ExpressionT expr = factory()->NewAwait(value, await_pos);
+ function_state_->AddSuspend();
impl()->RecordSuspendSourceRange(expr, PositionAfterSemicolon());
return expr;
} else {
@@ -3399,10 +3446,8 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
ArrowFormalParametersUnexpectedToken();
Consume(Token::PERIOD);
int pos = position();
- IdentifierT name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewStringLiteral(name, pos), pos);
- impl()->PushLiteralName(name);
+ ExpressionT key = ParseIdentifierNameOrPrivateName(CHECK_OK);
+ result = factory()->NewProperty(result, key, pos);
break;
}
@@ -3513,22 +3558,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
Consume(Token::FUNCTION);
int function_token_position = position();
- if (allow_harmony_function_sent() && peek() == Token::PERIOD) {
- // function.sent
- int pos = position();
- ExpectMetaProperty(Token::SENT, "function.sent", pos, CHECK_OK);
-
- if (!is_generator()) {
- // TODO(neis): allow escaping into closures?
- impl()->ReportMessageAt(scanner()->location(),
- MessageTemplate::kUnexpectedFunctionSent);
- *ok = false;
- return impl()->NullExpression();
- }
-
- return impl()->FunctionSentExpression(pos);
- }
-
FunctionKind function_kind = Check(Token::MUL)
? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction;
@@ -3699,16 +3728,8 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
Consume(Token::PERIOD);
int pos = peek_position();
- IdentifierT name;
- if (allow_harmony_private_fields() && peek() == Token::PRIVATE_NAME) {
- Consume(Token::PRIVATE_NAME);
- name = impl()->GetSymbol();
- } else {
- name = ParseIdentifierName(CHECK_OK);
- }
- expression = factory()->NewProperty(
- expression, factory()->NewStringLiteral(name, pos), pos);
- impl()->PushLiteralName(name);
+ ExpressionT key = ParseIdentifierNameOrPrivateName(CHECK_OK);
+ expression = factory()->NewProperty(expression, key, pos);
break;
}
case Token::TEMPLATE_SPAN:
@@ -4354,6 +4375,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
StatementListT body = impl()->NullStatementList();
int expected_property_count = -1;
+ int suspend_count = 0;
int function_literal_id = GetNextFunctionLiteralId();
FunctionKind kind = formal_parameters.scope->function_kind();
@@ -4440,6 +4462,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
impl()->RewriteDestructuringAssignments();
+ suspend_count = function_state.suspend_count();
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
@@ -4451,6 +4474,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
formal_parameters.scope->start_position(), has_braces,
function_literal_id, produced_preparsed_scope_data);
+ function_literal->set_suspend_count(suspend_count);
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
@@ -4522,27 +4546,29 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
bool is_static;
ClassLiteralProperty::Kind property_kind;
ExpressionClassifier property_classifier(this);
+ IdentifierT property_name;
// If we haven't seen the constructor yet, it potentially is the next
// property.
bool is_constructor = !class_info.has_seen_constructor;
ClassLiteralPropertyT property = ParseClassPropertyDefinition(
- &checker, &class_info, has_extends, &is_computed_name,
+ &checker, &class_info, &property_name, has_extends, &is_computed_name,
&class_info.has_seen_constructor, &property_kind, &is_static,
&class_info.has_name_static_property, CHECK_OK);
if (!class_info.has_static_computed_names && is_static &&
is_computed_name) {
class_info.has_static_computed_names = true;
}
- if (is_computed_name && property_kind == ClassLiteralProperty::FIELD) {
+ if (is_computed_name &&
+ property_kind == ClassLiteralProperty::PUBLIC_FIELD) {
class_info.computed_field_count++;
}
is_constructor &= class_info.has_seen_constructor;
ValidateExpression(CHECK_OK);
AccumulateFormalParameterContainmentErrors();
- impl()->DeclareClassProperty(name, property, property_kind, is_static,
- is_constructor, is_computed_name, &class_info,
- CHECK_OK);
+ impl()->DeclareClassProperty(name, property, property_name, property_kind,
+ is_static, is_constructor, is_computed_name,
+ &class_info, CHECK_OK);
impl()->InferFunctionName();
}
@@ -4644,6 +4670,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
// TEMPLATE_SPAN, or a TEMPLATE_TAIL.
DCHECK(peek() == Token::TEMPLATE_SPAN || peek() == Token::TEMPLATE_TAIL);
+ if (tagged) {
+ // TaggedTemplate expressions prevent the eval compilation cache from being
+ // used. This flag is only used if an eval is being parsed.
+ set_allow_eval_cache(false);
+ }
+
bool forbid_illegal_escapes = !tagged;
// If we reach a TEMPLATE_TAIL first, we are parsing a NoSubstitutionTemplate.
@@ -4794,7 +4826,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic(
ExpressionClassifier classifier(this);
ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
- DCHECK(!spread_pos.IsValid());
+ if (spread_pos.IsValid()) {
+ *ok = false;
+ ReportMessageAt(spread_pos, MessageTemplate::kIntrinsicWithSpread,
+ kSyntaxError);
+ return impl()->NullExpression();
+ }
return impl()->NewV8Intrinsic(name, args, pos, ok);
}
@@ -4943,7 +4980,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
Consume(Token::ASYNC);
return ParseAsyncFunctionDeclaration(nullptr, false, ok);
}
- /* falls through */
+ break;
default:
break;
}
@@ -5044,7 +5081,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
*ok = false;
return impl()->NullStatement();
}
- // Falls through
+ V8_FALLTHROUGH;
default:
return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
}
@@ -5653,6 +5690,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
ZoneList<const AstRawString*>* labels, bool* ok) {
+ // Either a standard for loop
+ // for (<init>; <cond>; <next>) { ... }
+ // or a for-each loop
+ // for (<each> of|in <iterable>) { ... }
+ //
+ // We parse a declaration/expression after the 'for (' and then read the first
+ // expression/declaration before we know if this is a for or a for-each.
+
int stmt_pos = peek_position();
ForInfo for_info(this);
@@ -6187,7 +6232,7 @@ void ParserBase<Impl>::ClassLiteralChecker::CheckClassFieldName(bool is_static,
return;
}
- if (IsConstructor()) {
+ if (IsConstructor() || IsPrivateConstructor()) {
this->parser()->ReportMessage(MessageTemplate::kConstructorClassField);
*ok = false;
return;
@@ -6201,4 +6246,4 @@ void ParserBase<Impl>::ClassLiteralChecker::CheckClassFieldName(bool is_static,
} // namespace internal
} // namespace v8
-#endif // V8_PARSING_PARSER_BASE_H
+#endif // V8_PARSING_PARSER_BASE_H_
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 0497958c82..8dc16a8b35 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -29,82 +29,8 @@
namespace v8 {
namespace internal {
-ScriptData::ScriptData(const byte* data, int length)
- : owns_data_(false), rejected_(false), data_(data), length_(length) {
- if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
- byte* copy = NewArray<byte>(length);
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
- CopyBytes(copy, data, length);
- data_ = copy;
- AcquireDataOwnership();
- }
-}
-
-FunctionEntry ParseData::GetFunctionEntry(int start) {
- // The current pre-data entry must be a FunctionEntry with the given
- // start position.
- if ((function_index_ + FunctionEntry::kSize <= Length()) &&
- (static_cast<int>(Data()[function_index_]) == start)) {
- int index = function_index_;
- function_index_ += FunctionEntry::kSize;
- Vector<unsigned> subvector(&(Data()[index]), FunctionEntry::kSize);
- return FunctionEntry(subvector);
- }
- return FunctionEntry();
-}
-
-
-int ParseData::FunctionCount() {
- int functions_size = FunctionsSize();
- if (functions_size < 0) return 0;
- if (functions_size % FunctionEntry::kSize != 0) return 0;
- return functions_size / FunctionEntry::kSize;
-}
-
-
-bool ParseData::IsSane() {
- if (!IsAligned(script_data_->length(), sizeof(unsigned))) return false;
- // Check that the header data is valid and doesn't specify
- // point to positions outside the store.
- int data_length = Length();
- if (data_length < PreparseDataConstants::kHeaderSize) return false;
- if (Magic() != PreparseDataConstants::kMagicNumber) return false;
- if (Version() != PreparseDataConstants::kCurrentVersion) return false;
- // Check that the space allocated for function entries is sane.
- int functions_size = FunctionsSize();
- if (functions_size < 0) return false;
- if (functions_size % FunctionEntry::kSize != 0) return false;
- // Check that the total size has room for header and function entries.
- int minimum_size =
- PreparseDataConstants::kHeaderSize + functions_size;
- if (data_length < minimum_size) return false;
- return true;
-}
-
-
-void ParseData::Initialize() {
- // Prepares state for use.
- int data_length = Length();
- if (data_length >= PreparseDataConstants::kHeaderSize) {
- function_index_ = PreparseDataConstants::kHeaderSize;
- }
-}
-
-
-unsigned ParseData::Magic() {
- return Data()[PreparseDataConstants::kMagicOffset];
-}
-
-
-unsigned ParseData::Version() {
- return Data()[PreparseDataConstants::kVersionOffset];
-}
-int ParseData::FunctionsSize() {
- return static_cast<int>(Data()[PreparseDataConstants::kFunctionsSizeOffset]);
-}
-
// Helper for putting parts of the parse results into a temporary zone when
// parsing inner function bodies.
class DiscardableZoneScope {
@@ -153,17 +79,6 @@ class DiscardableZoneScope {
DISALLOW_COPY_AND_ASSIGN(DiscardableZoneScope);
};
-void Parser::SetCachedData(ParseInfo* info) {
- DCHECK_NULL(cached_parse_data_);
- if (consume_cached_parse_data()) {
- if (allow_lazy_) {
- cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
- if (cached_parse_data_ != nullptr) return;
- }
- compile_options_ = ScriptCompiler::kNoCompileOptions;
- }
-}
-
FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
bool call_super, int pos,
int end_pos) {
@@ -403,16 +318,6 @@ Expression* Parser::NewTargetExpression(int pos) {
return proxy;
}
-Expression* Parser::FunctionSentExpression(int pos) {
- // We desugar function.sent into %_GeneratorGetInputOrDebugPos(generator).
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
- VariableProxy* generator = factory()->NewVariableProxy(
- function_state_->scope()->generator_object_var());
- args->Add(generator, zone());
- return factory()->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
- args, pos);
-}
-
Expression* Parser::ImportMetaExpression(int pos) {
return factory()->NewCallRuntime(
Runtime::kInlineGetImportMetaObject,
@@ -511,11 +416,8 @@ Parser::Parser(ParseInfo* info)
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
target_stack_(nullptr),
- compile_options_(info->compile_options()),
- cached_parse_data_(nullptr),
total_preparse_skipped_(0),
temp_zoned_(false),
- log_(nullptr),
consumed_preparsed_scope_data_(info->consumed_preparsed_scope_data()),
parameters_end_pos_(info->parameters_end_pos()) {
// Even though we were passed ParseInfo, we should not store it in
@@ -541,7 +443,6 @@ Parser::Parser(ParseInfo* info)
info->extension() == nullptr && can_compile_lazily;
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
- set_allow_harmony_function_sent(FLAG_harmony_function_sent);
set_allow_harmony_public_fields(FLAG_harmony_public_fields);
set_allow_harmony_static_fields(FLAG_harmony_static_fields);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
@@ -603,18 +504,6 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
// Initialize parser state.
- ParserLogger logger;
-
- if (produce_cached_parse_data()) {
- if (allow_lazy_) {
- log_ = &logger;
- } else {
- compile_options_ = ScriptCompiler::kNoCompileOptions;
- }
- } else if (consume_cached_parse_data()) {
- cached_parse_data_->Initialize();
- }
-
DeserializeScopeChain(info, info->maybe_outer_scope_info());
scanner_.Initialize(info->character_stream(), info->is_module());
@@ -623,11 +512,6 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
HandleSourceURLComments(isolate, info->script());
- if (produce_cached_parse_data() && result != nullptr) {
- *info->cached_data() = logger.GetScriptData();
- }
- log_ = nullptr;
-
if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
const char* event_name = "parse-eval";
@@ -742,6 +626,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
result = factory()->NewScriptOrEvalFunctionLiteral(
scope, body, function_state.expected_property_count(),
parameter_count);
+ result->set_suspend_count(function_state.suspend_count());
}
}
@@ -904,8 +789,9 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
scope->set_start_position(info->start_position());
ExpressionClassifier formals_classifier(this);
ParserFormalParameters formals(scope);
- int rewritable_length =
- function_state.destructuring_assignments_to_rewrite().length();
+ // The outer FunctionState should not contain destructuring assignments.
+ DCHECK_EQ(0,
+ function_state.destructuring_assignments_to_rewrite().length());
{
// Parsing patterns as variable reference expression creates
// NewUnresolved references in current scope. Enter arrow function
@@ -943,8 +829,12 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
// Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
// not be observable, or else the preparser would have failed.
- Expression* expression =
- ParseArrowFunctionLiteral(true, formals, rewritable_length, &ok);
+ const bool accept_IN = true;
+ // Any destructuring assignments in the current FunctionState
+ // actually belong to the arrow function itself.
+ const int rewritable_length = 0;
+ Expression* expression = ParseArrowFunctionLiteral(
+ accept_IN, formals, rewritable_length, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -957,10 +847,6 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
// must produce a FunctionLiteral.
DCHECK(expression->IsFunctionLiteral());
result = expression->AsFunctionLiteral();
- // Rewrite destructuring assignments in the parameters. (The ones
- // inside the function body are rewritten by
- // ParseArrowFunctionLiteral.)
- RewriteDestructuringAssignments();
} else {
ok = false;
}
@@ -1293,7 +1179,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
result = ParseAsyncFunctionDeclaration(&local_names, true, CHECK_OK);
break;
}
- /* falls through */
+ V8_FALLTHROUGH;
default: {
int pos = position();
@@ -1531,8 +1417,7 @@ Statement* Parser::DeclareFunction(const AstRawString* variable_name,
ZoneList<const AstRawString*>* names,
bool* ok) {
VariableProxy* proxy =
- factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE);
-
+ factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE, pos);
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, function, pos);
Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
@@ -1831,6 +1716,8 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
// Don't create iterator result for async generators, as the resume methods
// will create it.
+ // TODO(leszeks): This will create another suspend point, which is unnecessary
+ // if there is already an unconditional return in the body.
Statement* final_return = BuildReturnStatement(
factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
try_block->statements()->Add(final_return, zone());
@@ -1900,6 +1787,7 @@ Expression* Parser::BuildIteratorNextResult(VariableProxy* iterator,
Expression* next_call =
factory()->NewCall(next_property, next_arguments, kNoSourcePosition);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
next_call = factory()->NewAwait(next_call, pos);
}
Expression* result_proxy = factory()->NewVariableProxy(result);
@@ -2681,6 +2569,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
ZoneList<Statement*>* body = nullptr;
int expected_property_count = -1;
+ int suspend_count = -1;
int num_parameters = -1;
int function_length = -1;
bool has_duplicate_parameters = false;
@@ -2747,10 +2636,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_preparse) {
scope->AnalyzePartially(&previous_zone_ast_node_factory);
} else {
- body = ParseFunction(function_name, pos, kind, function_type, scope,
- &num_parameters, &function_length,
- &has_duplicate_parameters, &expected_property_count,
- arguments_for_wrapped_function, CHECK_OK);
+ body = ParseFunction(
+ function_name, pos, kind, function_type, scope, &num_parameters,
+ &function_length, &has_duplicate_parameters, &expected_property_count,
+ &suspend_count, arguments_for_wrapped_function, CHECK_OK);
}
DCHECK_EQ(should_preparse, temp_zoned_);
@@ -2808,6 +2697,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_length, duplicate_parameters, function_type, eager_compile_hint,
pos, true, function_literal_id, produced_preparsed_scope_data);
function_literal->set_function_token_position(function_token_pos);
+ function_literal->set_suspend_count(suspend_count);
if (should_infer_name) {
DCHECK_NOT_NULL(fni_);
@@ -2826,38 +2716,11 @@ Parser::LazyParsingResult Parser::SkipFunction(
DCHECK_NE(kNoSourcePosition, function_scope->start_position());
DCHECK_EQ(kNoSourcePosition, parameters_end_pos_);
- if (produce_cached_parse_data()) CHECK(log_);
DCHECK_IMPLIES(IsArrowFunction(kind),
scanner()->current_token() == Token::ARROW);
- // Inner functions are not part of the cached data.
- if (!is_inner_function && consume_cached_parse_data() &&
- !cached_parse_data_->rejected()) {
- // If we have cached data, we use it to skip parsing the function. The data
- // contains the information we need to construct the lazy function.
- FunctionEntry entry =
- cached_parse_data_->GetFunctionEntry(function_scope->start_position());
- // Check that cached data is valid. If not, mark it as invalid (the embedder
- // handles it). Note that end position greater than end of stream is safe,
- // and hard to check.
- if (entry.is_valid() &&
- entry.end_pos() > function_scope->start_position()) {
- total_preparse_skipped_ += entry.end_pos() - position();
- function_scope->set_end_position(entry.end_pos());
- scanner()->SeekForward(entry.end_pos() - 1);
- Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
- *num_parameters = entry.num_parameters();
- SetLanguageMode(function_scope, entry.language_mode());
- if (entry.uses_super_property())
- function_scope->RecordSuperPropertyUsage();
- SkipFunctionLiterals(entry.num_inner_functions());
- return kLazyParsingComplete;
- }
- cached_parse_data_->Reject();
- }
-
- // FIXME(marja): There are 3 ways to skip functions now. Unify them.
+ // FIXME(marja): There are 2 ways to skip functions now. Unify them.
DCHECK_NOT_NULL(consumed_preparsed_scope_data_);
if (consumed_preparsed_scope_data_->HasData()) {
DCHECK(FLAG_preparser_scope_analysis);
@@ -2908,6 +2771,9 @@ Parser::LazyParsingResult Parser::SkipFunction(
*ok = false;
return kLazyParsingComplete;
}
+
+ set_allow_eval_cache(reusable_preparser()->allow_eval_cache());
+
PreParserLogger* logger = reusable_preparser()->logger();
function_scope->set_end_position(logger->end());
Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
@@ -2915,13 +2781,6 @@ Parser::LazyParsingResult Parser::SkipFunction(
function_scope->end_position() - function_scope->start_position();
*num_parameters = logger->num_parameters();
SkipFunctionLiterals(logger->num_inner_functions());
- if (!is_inner_function && produce_cached_parse_data()) {
- DCHECK(log_);
- log_->LogFunction(function_scope->start_position(),
- function_scope->end_position(), *num_parameters,
- language_mode(), function_scope->NeedsHomeObject(),
- logger->num_inner_functions());
- }
return kLazyParsingComplete;
}
@@ -3136,7 +2995,7 @@ Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
Expression* call_runtime =
- factory()->NewCallRuntime(Context::PROMISE_RESOLVE_INDEX, args, pos);
+ factory()->NewCallRuntime(Runtime::kInlineResolvePromise, args, pos);
return factory()->NewBinaryOperation(
Token::COMMA, call_runtime,
factory()->NewVariableProxy(PromiseVariable()), pos);
@@ -3150,8 +3009,8 @@ Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
args->Add(value, zone());
args->Add(factory()->NewBooleanLiteral(false, pos), zone());
- Expression* call_runtime = factory()->NewCallRuntime(
- Context::PROMISE_INTERNAL_REJECT_INDEX, args, pos);
+ Expression* call_runtime =
+ factory()->NewCallRuntime(Runtime::kInlineRejectPromise, args, pos);
return factory()->NewBinaryOperation(
Token::COMMA, call_runtime,
factory()->NewVariableProxy(PromiseVariable()), pos);
@@ -3175,6 +3034,7 @@ Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
// The position of the yield is important for reporting the exception
// caused by calling the .throw method on a generator suspended at the
// initial yield (i.e. right after generator instantiation).
+ function_state_->AddSuspend();
return factory()->NewYield(yield_result, scope()->start_position(),
Suspend::kOnExceptionThrow);
}
@@ -3184,6 +3044,7 @@ ZoneList<Statement*>* Parser::ParseFunction(
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters, int* function_length,
bool* has_duplicate_parameters, int* expected_property_count,
+ int* suspend_count,
ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok) {
ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
@@ -3268,6 +3129,7 @@ ZoneList<Statement*>* Parser::ParseFunction(
!classifier()->is_valid_formal_parameter_list_without_duplicates();
*expected_property_count = function_state.expected_property_count();
+ *suspend_count = function_state.suspend_count();
return body;
}
@@ -3308,6 +3170,7 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name,
// - properties
void Parser::DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
+ const AstRawString* property_name,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
bool is_computed_name, ClassInfo* class_info,
@@ -3322,7 +3185,8 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
return;
}
- if (kind != ClassLiteralProperty::FIELD) {
+ if (kind != ClassLiteralProperty::PUBLIC_FIELD &&
+ kind != ClassLiteralProperty::PRIVATE_FIELD) {
class_info->properties->Add(property, zone());
return;
}
@@ -3331,12 +3195,14 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
if (is_static) {
DCHECK(allow_harmony_static_fields());
+ DCHECK_EQ(kind, ClassLiteralProperty::PUBLIC_FIELD);
class_info->static_fields->Add(property, zone());
} else {
class_info->instance_fields->Add(property, zone());
}
if (is_computed_name) {
+ DCHECK_EQ(kind, ClassLiteralProperty::PUBLIC_FIELD);
// We create a synthetic variable name here so that scope
// analysis doesn't dedupe the vars.
Variable* computed_name_var = CreateSyntheticContextVariable(
@@ -3346,6 +3212,13 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
property->set_computed_name_var(computed_name_var);
class_info->properties->Add(property, zone());
}
+
+ if (kind == ClassLiteralProperty::PRIVATE_FIELD) {
+ Variable* private_field_name_var =
+ CreateSyntheticContextVariable(property_name, CHECK_OK_VOID);
+ property->set_private_field_name_var(private_field_name_var);
+ class_info->properties->Add(property, zone());
+ }
}
FunctionLiteral* Parser::CreateInitializerFunction(
@@ -3432,6 +3305,15 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
}
}
+bool Parser::IsPropertyWithPrivateFieldKey(Expression* expression) {
+ if (!expression->IsProperty()) return false;
+ Property* property = expression->AsProperty();
+
+ if (!property->key()->IsVariableProxy()) return false;
+ VariableProxy* key = property->key()->AsVariableProxy();
+
+ return key->is_private_field();
+}
void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
// For each var-binding that shadows a parameter, insert an assignment
@@ -3550,15 +3432,6 @@ void Parser::ParseOnBackground(ParseInfo* info) {
DCHECK_NULL(info->literal());
FunctionLiteral* result = nullptr;
- ParserLogger logger;
- if (produce_cached_parse_data()) {
- if (allow_lazy_) {
- log_ = &logger;
- } else {
- compile_options_ = ScriptCompiler::kNoCompileOptions;
- }
- }
-
scanner_.Initialize(info->character_stream(), info->is_module());
DCHECK(info->maybe_outer_scope_info().is_null());
@@ -3582,11 +3455,6 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// We cannot internalize on a background thread; a foreground task will take
// care of calling AstValueFactory::Internalize just before compilation.
-
- if (produce_cached_parse_data()) {
- if (result != nullptr) *info->cached_data() = logger.GetScriptData();
- log_ = nullptr;
- }
}
Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
@@ -3654,9 +3522,8 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
return expr;
} else {
// GetTemplateObject
- const int32_t hash = ComputeTemplateLiteralHash(lit);
Expression* template_object =
- factory()->NewGetTemplateObject(cooked_strings, raw_strings, hash, pos);
+ factory()->NewGetTemplateObject(cooked_strings, raw_strings, pos);
// Call TagFn
ZoneList<Expression*>* call_args =
@@ -3669,51 +3536,6 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
namespace {
-// http://burtleburtle.net/bob/hash/integer.html
-uint32_t HalfAvalance(uint32_t a) {
- a = (a + 0x479AB41D) + (a << 8);
- a = (a ^ 0xE4AA10CE) ^ (a >> 5);
- a = (a + 0x9942F0A6) - (a << 14);
- a = (a ^ 0x5AEDD67D) ^ (a >> 3);
- a = (a + 0x17BEA992) + (a << 7);
- return a;
-}
-
-} // namespace
-
-int32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
- const ZoneList<const AstRawString*>* raw_strings = lit->raw();
- int total = raw_strings->length();
- DCHECK_GT(total, 0);
-
- uint32_t running_hash = 0;
-
- for (int index = 0; index < total; ++index) {
- if (index) {
- running_hash = StringHasher::ComputeRunningHashOneByte(
- running_hash, "${}", 3);
- }
-
- const AstRawString* raw_string = raw_strings->at(index);
- if (raw_string->is_one_byte()) {
- const char* data = reinterpret_cast<const char*>(raw_string->raw_data());
- running_hash = StringHasher::ComputeRunningHashOneByte(
- running_hash, data, raw_string->length());
- } else {
- const uc16* data = reinterpret_cast<const uc16*>(raw_string->raw_data());
- running_hash = StringHasher::ComputeRunningHash(running_hash, data,
- raw_string->length());
- }
- }
-
- // Pass {running_hash} throught a decent 'half avalance' hash function
- // and take the most significant bits (in Smi range).
- return static_cast<int32_t>(HalfAvalance(running_hash)) >>
- (sizeof(int32_t) * CHAR_BIT - kSmiValueSize);
-}
-
-namespace {
-
bool OnlyLastArgIsSpread(ZoneList<Expression*>* args) {
for (int i = 0; i < args->length() - 1; i++) {
if (args->at(i)->IsSpread()) {
@@ -3911,6 +3733,9 @@ void Parser::RewriteDestructuringAssignments() {
// pair.scope may already have been removed by FinalizeBlockScope in the
// meantime.
Scope* scope = to_rewrite->scope()->GetUnremovedScope();
+ // Scope at the time of the rewriting and the original parsing
+ // should be in the same function.
+ DCHECK(scope->GetClosureScope() == scope_->GetClosureScope());
BlockState block_state(&scope_, scope);
RewriteDestructuringAssignment(to_rewrite);
}
@@ -4070,6 +3895,7 @@ void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
call = factory()->NewAwait(call, nopos);
}
Expression* output_proxy = factory()->NewVariableProxy(var_output);
@@ -4288,6 +4114,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
call = factory()->NewAwait(call, nopos);
}
@@ -4315,6 +4142,7 @@ void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
Expression* call =
factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
if (type == IteratorType::kAsync) {
+ function_state_->AddSuspend();
call = factory()->NewAwait(call, nopos);
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index f92eddcd9d..dcc222da0f 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -14,7 +14,6 @@
#include "src/globals.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
-#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/utils.h"
@@ -27,7 +26,6 @@ namespace internal {
class ConsumedPreParsedScopeData;
class ParseInfo;
-class ScriptData;
class ParserTarget;
class ParserTargetScope;
class PendingCompilationErrorHandler;
@@ -77,47 +75,6 @@ class FunctionEntry BASE_EMBEDDED {
};
-// Wrapper around ScriptData to provide parser-specific functionality.
-class ParseData {
- public:
- static ParseData* FromCachedData(ScriptData* cached_data) {
- ParseData* pd = new ParseData(cached_data);
- if (pd->IsSane()) return pd;
- cached_data->Reject();
- delete pd;
- return nullptr;
- }
-
- void Initialize();
- FunctionEntry GetFunctionEntry(int start);
- int FunctionCount();
-
- unsigned* Data() { // Writable data as unsigned int array.
- return reinterpret_cast<unsigned*>(const_cast<byte*>(script_data_->data()));
- }
-
- void Reject() { script_data_->Reject(); }
-
- bool rejected() const { return script_data_->rejected(); }
-
- private:
- explicit ParseData(ScriptData* script_data) : script_data_(script_data) {}
-
- bool IsSane();
- unsigned Magic();
- unsigned Version();
- int FunctionsSize();
- int Length() const {
- // Script data length is already checked to be a multiple of unsigned size.
- return script_data_->length() / sizeof(unsigned);
- }
-
- ScriptData* script_data_;
- int function_index_;
-
- DISALLOW_COPY_AND_ASSIGN(ParseData);
-};
-
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
@@ -192,8 +149,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
~Parser() {
delete reusable_preparser_;
reusable_preparser_ = nullptr;
- delete cached_parse_data_;
- cached_parse_data_ = nullptr;
}
static bool IsPreParser() { return false; }
@@ -276,20 +231,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ZoneList<const AstRawString*>* PrepareWrappedArguments(ParseInfo* info,
Zone* zone);
- void SetCachedData(ParseInfo* info);
-
void StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate);
- ScriptCompiler::CompileOptions compile_options() const {
- return compile_options_;
- }
- bool consume_cached_parse_data() const {
- return compile_options_ == ScriptCompiler::kConsumeParserCache;
- }
- bool produce_cached_parse_data() const {
- return compile_options_ == ScriptCompiler::kProduceParserCache;
- }
-
PreParser* reusable_preparser() {
if (reusable_preparser_ == nullptr) {
reusable_preparser_ =
@@ -299,7 +242,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
SET_ALLOW(harmony_do_expressions);
- SET_ALLOW(harmony_function_sent);
SET_ALLOW(harmony_public_fields);
SET_ALLOW(harmony_static_fields);
SET_ALLOW(harmony_dynamic_import);
@@ -307,6 +249,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
SET_ALLOW(harmony_bigint);
SET_ALLOW(harmony_optional_catch_binding);
SET_ALLOW(harmony_private_fields);
+ SET_ALLOW(eval_cache);
#undef SET_ALLOW
}
return reusable_preparser_;
@@ -377,6 +320,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int class_token_pos, bool* ok);
V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
+ const AstRawString* property_name,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
bool is_computed_name,
@@ -456,6 +400,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// hoisted over such a scope.
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
+ bool IsPropertyWithPrivateFieldKey(Expression* property);
+
// Insert initializer statements for var-bindings shadowing parameter bindings
// from a non-simple parameter list.
void InsertShadowingVarBindingInitializers(Block* block);
@@ -506,7 +452,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
FunctionLiteral::FunctionType function_type,
DeclarationScope* function_scope, int* num_parameters,
int* function_length, bool* has_duplicate_parameters,
- int* expected_property_count,
+ int* expected_property_count, int* suspend_count,
ZoneList<const AstRawString*>* arguments_for_wrapped_function, bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -555,7 +501,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* expression);
Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag);
- int32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
@@ -883,12 +828,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* NewSuperPropertyReference(int pos);
Expression* NewSuperCallReference(int pos);
Expression* NewTargetExpression(int pos);
- Expression* FunctionSentExpression(int pos);
Expression* ImportMetaExpression(int pos);
Literal* ExpressionFromLiteral(Token::Value token, int pos);
- V8_INLINE Expression* ExpressionFromIdentifier(
+ V8_INLINE VariableProxy* ExpressionFromIdentifier(
const AstRawString* name, int start_position,
InferName infer = InferName::kYes) {
if (infer == InferName::kYes) {
@@ -1144,7 +1088,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ParserTarget* target_stack_; // for break, continue statements
ScriptCompiler::CompileOptions compile_options_;
- ParseData* cached_parse_data_;
// Other information which will be stored in Parser and moved to Isolate after
// parsing.
@@ -1152,7 +1095,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int total_preparse_skipped_;
bool allow_lazy_;
bool temp_zoned_;
- ParserLogger* log_;
ConsumedPreParsedScopeData* consumed_preparsed_scope_data_;
// If not kNoSourcePosition, indicates that the first function literal
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index bc3c6dec7b..d34f826a23 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -36,7 +36,6 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parser.parsing_on_main_thread_);
- parser.SetCachedData(info);
result = parser.ParseProgram(isolate, info);
info->set_literal(result);
if (result == nullptr) {
@@ -45,6 +44,9 @@ bool ParseProgram(ParseInfo* info, Isolate* isolate) {
} else {
result->scope()->AttachOuterScopeInfo(info, isolate);
info->set_language_mode(info->literal()->language_mode());
+ if (info->is_eval()) {
+ info->set_allow_eval_cache(parser.allow_eval_cache());
+ }
}
parser.UpdateStatistics(isolate, info->script());
return (result != nullptr);
@@ -79,6 +81,9 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
info->ast_value_factory());
} else {
result->scope()->AttachOuterScopeInfo(info, isolate);
+ if (info->is_eval()) {
+ info->set_allow_eval_cache(parser.allow_eval_cache());
+ }
}
parser.UpdateStatistics(isolate, info->script());
return (result != nullptr);
diff --git a/deps/v8/src/parsing/preparse-data-format.h b/deps/v8/src/parsing/preparse-data-format.h
deleted file mode 100644
index 2f317ce75f..0000000000
--- a/deps/v8/src/parsing/preparse-data-format.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_PREPARSE_DATA_FORMAT_H_
-#define V8_PARSING_PREPARSE_DATA_FORMAT_H_
-
-namespace v8 {
-namespace internal {
-
-// Generic and general data used by preparse data recorders and readers.
-
-struct PreparseDataConstants {
- public:
- // Layout and constants of the preparse data exchange format.
- static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 17;
-
- static const int kMagicOffset = 0;
- static const int kVersionOffset = 1;
- static const int kFunctionsSizeOffset = 2;
- static const int kSizeOffset = 3;
- static const int kHeaderSize = 4;
-
- static const unsigned char kNumberTerminator = 0x80u;
-};
-
-
-} // namespace internal
-} // namespace v8.
-
-#endif // V8_PARSING_PREPARSE_DATA_FORMAT_H_
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index f11eb7b21e..e39218111d 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -8,53 +8,10 @@
#include "src/globals.h"
#include "src/objects-inl.h"
#include "src/parsing/parser.h"
-#include "src/parsing/preparse-data-format.h"
namespace v8 {
namespace internal {
-void ParserLogger::LogFunction(int start, int end, int num_parameters,
- LanguageMode language_mode,
- bool uses_super_property,
- int num_inner_functions) {
- function_store_.Add(start);
- function_store_.Add(end);
- function_store_.Add(num_parameters);
- function_store_.Add(
- FunctionEntry::EncodeFlags(language_mode, uses_super_property));
- function_store_.Add(num_inner_functions);
-}
-
-ParserLogger::ParserLogger() {
- preamble_[PreparseDataConstants::kMagicOffset] =
- PreparseDataConstants::kMagicNumber;
- preamble_[PreparseDataConstants::kVersionOffset] =
- PreparseDataConstants::kCurrentVersion;
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
- preamble_[PreparseDataConstants::kSizeOffset] = 0;
- DCHECK_EQ(4, PreparseDataConstants::kHeaderSize);
-#ifdef DEBUG
- prev_start_ = -1;
-#endif
-}
-
-ScriptData* ParserLogger::GetScriptData() {
- int function_size = function_store_.size();
- int total_size = PreparseDataConstants::kHeaderSize + function_size;
- unsigned* data = NewArray<unsigned>(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- MemCopy(data, preamble_, sizeof(preamble_));
- if (function_size > 0) {
- function_store_.WriteTo(Vector<unsigned>(
- data + PreparseDataConstants::kHeaderSize, function_size));
- }
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment));
- ScriptData* result = new ScriptData(reinterpret_cast<byte*>(data),
- total_size * sizeof(unsigned));
- result->AcquireDataOwnership();
- return result;
-}
-
PreParseData::FunctionData PreParseData::GetFunctionData(int start) const {
auto it = functions_.find(start);
if (it != functions_.end()) {
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index b5db652c9c..0e40c76927 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -11,43 +11,9 @@
#include "src/base/hashmap.h"
#include "src/collector.h"
#include "src/messages.h"
-#include "src/parsing/preparse-data-format.h"
-
namespace v8 {
namespace internal {
-class ScriptData {
- public:
- ScriptData(const byte* data, int length);
- ~ScriptData() {
- if (owns_data_) DeleteArray(data_);
- }
-
- const byte* data() const { return data_; }
- int length() const { return length_; }
- bool rejected() const { return rejected_; }
-
- void Reject() { rejected_ = true; }
-
- void AcquireDataOwnership() {
- DCHECK(!owns_data_);
- owns_data_ = true;
- }
-
- void ReleaseDataOwnership() {
- DCHECK(owns_data_);
- owns_data_ = false;
- }
-
- private:
- bool owns_data_ : 1;
- bool rejected_ : 1;
- const byte* data_;
- int length_;
-
- DISALLOW_COPY_AND_ASSIGN(ScriptData);
-};
-
class PreParserLogger final {
public:
PreParserLogger()
@@ -74,25 +40,6 @@ class PreParserLogger final {
int num_inner_functions_;
};
-class ParserLogger final {
- public:
- ParserLogger();
-
- void LogFunction(int start, int end, int num_parameters,
- LanguageMode language_mode, bool uses_super_property,
- int num_inner_functions);
-
- ScriptData* GetScriptData();
-
- private:
- Collector<unsigned> function_store_;
- unsigned preamble_[PreparseDataConstants::kHeaderSize];
-
-#ifdef DEBUG
- int prev_start_;
-#endif
-};
-
class PreParseData final {
public:
struct FunctionData {
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 786be3f0e5..70880f55c4 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -396,6 +396,7 @@ void ProducedPreParsedScopeData::SaveDataForVariable(Variable* var) {
// Store the variable name in debug mode; this way we can check that we
// restore data to the correct variable.
const AstRawString* name = var->raw_name();
+ byte_data_->WriteUint8(name->is_one_byte());
byte_data_->WriteUint32(name->length());
for (int i = 0; i < name->length(); ++i) {
byte_data_->WriteUint8(name->raw_data()[i]);
@@ -571,8 +572,7 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
if (scope_data_->RemainingBytes() < kUint8Size) {
// Temporary debugging code for detecting inconsistent data. Write debug
// information on the stack, then crash.
- data_->GetIsolate()->PushStackTraceAndDie(0xC0DEFEE, nullptr, nullptr,
- 0xC0DEFEE);
+ data_->GetIsolate()->PushStackTraceAndDie();
}
// scope_type is stored only in debug mode.
@@ -606,9 +606,20 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
void ConsumedPreParsedScopeData::RestoreDataForVariable(Variable* var) {
#ifdef DEBUG
const AstRawString* name = var->raw_name();
+ bool data_one_byte = scope_data_->ReadUint8();
+ DCHECK_IMPLIES(name->is_one_byte(), data_one_byte);
DCHECK_EQ(scope_data_->ReadUint32(), static_cast<uint32_t>(name->length()));
- for (int i = 0; i < name->length(); ++i) {
- DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ if (!name->is_one_byte() && data_one_byte) {
+ // It's possible that "name" is a two-byte representation of the string
+ // stored in the data.
+ for (int i = 0; i < 2 * name->length(); i += 2) {
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ DCHECK_EQ(0, name->raw_data()[i + 1]);
+ }
+ } else {
+ for (int i = 0; i < name->length(); ++i) {
+ DCHECK_EQ(scope_data_->ReadUint8(), name->raw_data()[i]);
+ }
}
#endif
uint8_t variable_data = scope_data_->ReadQuarter();
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index b28eab2e75..5bb58a03aa 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -11,7 +11,6 @@
#include "src/globals.h"
#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/parsing/preparser.h"
@@ -50,6 +49,8 @@ PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
return PreParserIdentifier::Await();
case Token::ASYNC:
return PreParserIdentifier::Async();
+ case Token::PRIVATE_NAME:
+ return PreParserIdentifier::PrivateName();
default:
break;
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 705cd011ee..86fa7d1150 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSING_PREPARSER_H
-#define V8_PARSING_PREPARSER_H
+#ifndef V8_PARSING_PREPARSER_H_
+#define V8_PARSING_PREPARSER_H_
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
@@ -51,6 +51,9 @@ class PreParserIdentifier {
static PreParserIdentifier Name() {
return PreParserIdentifier(kNameIdentifier);
}
+ static PreParserIdentifier PrivateName() {
+ return PreParserIdentifier(kPrivateNameIdentifier);
+ }
bool IsNull() const { return type_ == kNullIdentifier; }
bool IsEval() const { return type_ == kEvalIdentifier; }
bool IsArguments() const { return type_ == kArgumentsIdentifier; }
@@ -58,6 +61,7 @@ class PreParserIdentifier {
bool IsConstructor() const { return type_ == kConstructorIdentifier; }
bool IsAwait() const { return type_ == kAwaitIdentifier; }
bool IsName() const { return type_ == kNameIdentifier; }
+ bool IsPrivateName() const { return type_ == kPrivateNameIdentifier; }
private:
enum Type {
@@ -68,7 +72,8 @@ class PreParserIdentifier {
kConstructorIdentifier,
kAwaitIdentifier,
kAsyncIdentifier,
- kNameIdentifier
+ kNameIdentifier,
+ kPrivateNameIdentifier
};
explicit PreParserIdentifier(Type type) : type_(type), string_(nullptr) {}
@@ -169,6 +174,12 @@ class PreParserExpression {
variables);
}
+ static PreParserExpression ThisPropertyWithPrivateFieldKey() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(
+ kThisPropertyExpressionWithPrivateFieldKey));
+ }
+
static PreParserExpression ThisProperty() {
return PreParserExpression(
TypeField::encode(kExpression) |
@@ -181,6 +192,12 @@ class PreParserExpression {
ExpressionTypeField::encode(kPropertyExpression));
}
+ static PreParserExpression PropertyWithPrivateFieldKey() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kPropertyExpressionWithPrivateFieldKey));
+ }
+
static PreParserExpression Call() {
return PreParserExpression(TypeField::encode(kExpression) |
ExpressionTypeField::encode(kCallExpression));
@@ -254,13 +271,27 @@ class PreParserExpression {
bool IsThisProperty() const {
return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kThisPropertyExpression;
+ (ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
+ ExpressionTypeField::decode(code_) ==
+ kThisPropertyExpressionWithPrivateFieldKey);
}
bool IsProperty() const {
return TypeField::decode(code_) == kExpression &&
(ExpressionTypeField::decode(code_) == kPropertyExpression ||
- ExpressionTypeField::decode(code_) == kThisPropertyExpression);
+ ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
+ ExpressionTypeField::decode(code_) ==
+ kPropertyExpressionWithPrivateFieldKey ||
+ ExpressionTypeField::decode(code_) ==
+ kThisPropertyExpressionWithPrivateFieldKey);
+ }
+
+ bool IsPropertyWithPrivateFieldKey() const {
+ return TypeField::decode(code_) == kExpression &&
+ (ExpressionTypeField::decode(code_) ==
+ kPropertyExpressionWithPrivateFieldKey ||
+ ExpressionTypeField::decode(code_) ==
+ kThisPropertyExpressionWithPrivateFieldKey);
}
bool IsCall() const {
@@ -298,12 +329,22 @@ class PreParserExpression {
// and PreParser.
PreParserExpression* operator->() { return this; }
+ void set_is_private_field() {
+ if (variables_ != nullptr) {
+ DCHECK(IsIdentifier());
+ DCHECK(AsIdentifier().IsPrivateName());
+ DCHECK_EQ(1, variables_->length());
+ variables_->first()->set_is_private_field();
+ }
+ }
+
// More dummy implementations of things PreParser doesn't need to track:
void SetShouldEagerCompile() {}
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
void set_scope(Scope* scope) {}
+ void set_suspend_count(int suspend_count) {}
private:
enum Type {
@@ -319,7 +360,9 @@ class PreParserExpression {
enum ExpressionType {
kThisExpression,
kThisPropertyExpression,
+ kThisPropertyExpressionWithPrivateFieldKey,
kPropertyExpression,
+ kPropertyExpressionWithPrivateFieldKey,
kCallExpression,
kCallEvalExpression,
kCallTaggedTemplateExpression,
@@ -580,8 +623,16 @@ class PreParserFactory {
PreParserExpression NewVariableProxy(void* variable) {
return PreParserExpression::Default();
}
+
PreParserExpression NewProperty(const PreParserExpression& obj,
const PreParserExpression& key, int pos) {
+ if (key.IsIdentifier() && key.AsIdentifier().IsPrivateName()) {
+ if (obj.IsThis()) {
+ return PreParserExpression::ThisPropertyWithPrivateFieldKey();
+ }
+ return PreParserExpression::PropertyWithPrivateFieldKey();
+ }
+
if (obj.IsThis()) {
return PreParserExpression::ThisProperty();
}
@@ -984,6 +1035,10 @@ class PreParser : public ParserBase<PreParser> {
TemplateLiteralState* state, int start, const PreParserExpression& tag) {
return PreParserExpression::Default();
}
+ V8_INLINE bool IsPropertyWithPrivateFieldKey(
+ const PreParserExpression& expression) {
+ return expression.IsPropertyWithPrivateFieldKey();
+ }
V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
@@ -1142,16 +1197,23 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE void DeclareClassProperty(const PreParserIdentifier& class_name,
const PreParserExpression& property,
+ const PreParserIdentifier& property_name,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
bool is_computed_name,
ClassInfo* class_info, bool* ok) {
- if (kind == ClassLiteralProperty::FIELD && is_computed_name) {
+ if (kind == ClassLiteralProperty::PUBLIC_FIELD && is_computed_name) {
scope()->DeclareVariableName(
ClassFieldVariableName(ast_value_factory(),
class_info->computed_field_count),
CONST);
}
+
+ if (kind == ClassLiteralProperty::PRIVATE_FIELD &&
+ property_name.string_ != nullptr) {
+ DCHECK(track_unresolved_variables_);
+ scope()->DeclareVariableName(property_name.string_, CONST);
+ }
}
V8_INLINE PreParserExpression
@@ -1528,10 +1590,6 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::NewTargetExpression();
}
- V8_INLINE PreParserExpression FunctionSentExpression(int pos) {
- return PreParserExpression::Default();
- }
-
V8_INLINE PreParserExpression ImportMetaExpression(int pos) {
return PreParserExpression::Default();
}
@@ -1707,4 +1765,4 @@ PreParserExpression PreParser::SpreadCallNew(
} // namespace internal
} // namespace v8
-#endif // V8_PARSING_PREPARSER_H
+#endif // V8_PARSING_PREPARSER_H_
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 3152ab184e..a6dd075fec 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -1071,8 +1071,11 @@ Token::Value Scanner::ScanString() {
AddLiteralChar(c);
}
- while (c0_ != quote && c0_ != kEndOfInput &&
- !unibrow::IsLineTerminator(c0_)) {
+ bool (*line_terminator_func)(unsigned int) =
+ FLAG_harmony_subsume_json ? unibrow::IsStringLiteralLineTerminator
+ : unibrow::IsLineTerminator;
+
+ while (c0_ != quote && c0_ != kEndOfInput && !line_terminator_func(c0_)) {
uc32 c = c0_;
Advance();
if (c == '\\') {
@@ -1225,6 +1228,56 @@ void Scanner::ScanDecimalDigits() {
AddLiteralCharAdvance();
}
+bool Scanner::ScanBinaryDigits() {
+ // we must have at least one binary digit after 'b'/'B'
+ if (!IsBinaryDigit(c0_)) return false;
+ while (IsBinaryDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+ return true;
+}
+
+bool Scanner::ScanOctalDigits() {
+ // we must have at least one octal digit after 'o'/'O'
+ if (!IsOctalDigit(c0_)) return false;
+ while (IsOctalDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+
+ return true;
+}
+
+bool Scanner::ScanImplicitOctalDigits(int start_pos) {
+ // (possible) octal number
+ while (true) {
+ if (c0_ == '8' || c0_ == '9') return false;
+ if (c0_ < '0' || '7' < c0_) {
+ // Octal literal finished.
+ octal_pos_ = Location(start_pos, source_pos());
+ octal_message_ = MessageTemplate::kStrictOctalLiteral;
+ break;
+ }
+ AddLiteralCharAdvance();
+ }
+ return true;
+}
+
+bool Scanner::ScanHexDigits() {
+ // we must have at least one hex digit after 'x'/'X'
+ if (!IsHexDigit(c0_)) return false;
+ while (IsHexDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+ return true;
+}
+
+bool Scanner::ScanSignedInteger() {
+ if (c0_ == '+' || c0_ == '-') AddLiteralCharAdvance();
+ // we must have at least one decimal digit after 'e'/'E'
+ if (!IsDecimalDigit(c0_)) return false;
+ ScanDecimalDigits();
+ return true;
+}
Token::Value Scanner::ScanNumber(bool seen_period) {
DCHECK(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
@@ -1254,52 +1307,22 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// either 0, 0exxx, 0Exxx, 0.xxx, a hex number, a binary number or
// an octal number.
if (c0_ == 'x' || c0_ == 'X') {
- // hex number
- kind = HEX;
AddLiteralCharAdvance();
- if (!IsHexDigit(c0_)) {
- // we must have at least one hex digit after 'x'/'X'
- return Token::ILLEGAL;
- }
- while (IsHexDigit(c0_)) {
- AddLiteralCharAdvance();
- }
+ kind = HEX;
+ if (!ScanHexDigits()) return Token::ILLEGAL;
} else if (c0_ == 'o' || c0_ == 'O') {
- kind = OCTAL;
AddLiteralCharAdvance();
- if (!IsOctalDigit(c0_)) {
- // we must have at least one octal digit after 'o'/'O'
- return Token::ILLEGAL;
- }
- while (IsOctalDigit(c0_)) {
- AddLiteralCharAdvance();
- }
+ kind = OCTAL;
+ if (!ScanOctalDigits()) return Token::ILLEGAL;
} else if (c0_ == 'b' || c0_ == 'B') {
- kind = BINARY;
AddLiteralCharAdvance();
- if (!IsBinaryDigit(c0_)) {
- // we must have at least one binary digit after 'b'/'B'
- return Token::ILLEGAL;
- }
- while (IsBinaryDigit(c0_)) {
- AddLiteralCharAdvance();
- }
+ kind = BINARY;
+ if (!ScanBinaryDigits()) return Token::ILLEGAL;
} else if ('0' <= c0_ && c0_ <= '7') {
- // (possible) octal number
kind = IMPLICIT_OCTAL;
- while (true) {
- if (c0_ == '8' || c0_ == '9') {
- at_start = false;
- kind = DECIMAL_WITH_LEADING_ZERO;
- break;
- }
- if (c0_ < '0' || '7' < c0_) {
- // Octal literal finished.
- octal_pos_ = Location(start_pos, source_pos());
- octal_message_ = MessageTemplate::kStrictOctalLiteral;
- break;
- }
- AddLiteralCharAdvance();
+ if (!ScanImplicitOctalDigits(start_pos)) {
+ kind = DECIMAL_WITH_LEADING_ZERO;
+ at_start = false;
}
} else if (c0_ == '8' || c0_ == '9') {
kind = DECIMAL_WITH_LEADING_ZERO;
@@ -1308,6 +1331,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// Parse decimal digits and allow trailing fractional part.
if (kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO) {
+ // This is an optimization for parsing Decimal numbers as Smi's.
if (at_start) {
uint64_t value = 0;
while (IsDecimalDigit(c0_)) {
@@ -1362,17 +1386,14 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
} else if (c0_ == 'e' || c0_ == 'E') {
// scan exponent, if any
DCHECK(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
+
if (!(kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO))
return Token::ILLEGAL;
+
// scan exponent
AddLiteralCharAdvance();
- if (c0_ == '+' || c0_ == '-')
- AddLiteralCharAdvance();
- if (!IsDecimalDigit(c0_)) {
- // we must have at least one decimal digit after 'e'/'E'
- return Token::ILLEGAL;
- }
- ScanDecimalDigits();
+
+ if (!ScanSignedInteger()) return Token::ILLEGAL;
}
// The source character immediately following a numeric literal must
@@ -1487,7 +1508,6 @@ uc32 Scanner::ScanUnicodeEscape() {
KEYWORD_GROUP('r') \
KEYWORD("return", Token::RETURN) \
KEYWORD_GROUP('s') \
- KEYWORD("sent", Token::SENT) \
KEYWORD("set", Token::SET) \
KEYWORD("static", Token::STATIC) \
KEYWORD("super", Token::SUPER) \
@@ -1510,13 +1530,15 @@ uc32 Scanner::ScanUnicodeEscape() {
KEYWORD_GROUP('y') \
KEYWORD("yield", Token::YIELD) \
KEYWORD_GROUP('_') \
- KEYWORD("__proto__", Token::PROTO_UNDERSCORED)
+ KEYWORD("__proto__", Token::PROTO_UNDERSCORED) \
+ KEYWORD_GROUP('#') \
+ KEYWORD("#constructor", Token::PRIVATE_CONSTRUCTOR)
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length) {
DCHECK_GE(input_length, 1);
const int kMinLength = 2;
- const int kMaxLength = 11;
+ const int kMaxLength = 12;
if (input_length < kMinLength || input_length > kMaxLength) {
return Token::IDENTIFIER;
}
@@ -1551,6 +1573,9 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
}
return Token::IDENTIFIER;
+#undef KEYWORDS
+#undef KEYWORD
+#undef KEYWORD_GROUP_CASE
}
Token::Value Scanner::ScanIdentifierOrKeyword() {
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index f5106990ff..fe7d754319 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -721,6 +721,12 @@ class Scanner {
Token::Value ScanHtmlComment();
void ScanDecimalDigits();
+ bool ScanHexDigits();
+ bool ScanBinaryDigits();
+ bool ScanSignedInteger();
+ bool ScanOctalDigits();
+ bool ScanImplicitOctalDigits(int start_pos);
+
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
Token::Value ScanIdentifierOrKeywordInner(LiteralScope* literal);
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index 07974edf41..660f24361c 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -188,13 +188,13 @@ namespace internal {
C(SET, "set", 0) \
C(OF, "of", 0) \
C(TARGET, "target", 0) \
- C(SENT, "sent", 0) \
C(META, "meta", 0) \
C(AS, "as", 0) \
C(FROM, "from", 0) \
C(NAME, "name", 0) \
C(PROTO_UNDERSCORED, "__proto__", 0) \
C(CONSTRUCTOR, "constructor", 0) \
+ C(PRIVATE_CONSTRUCTOR, "#constructor", 0) \
C(PROTOTYPE, "prototype", 0) \
C(EVAL, "eval", 0) \
C(ARGUMENTS, "arguments", 0) \
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 7ccd02ef9b..ac1362c9a9 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -31,8 +31,10 @@
#include "src/assembler.h"
#include "src/eh-frame.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/source-position-table.h"
+#include "src/wasm/wasm-code-manager.h"
#if V8_OS_LINUX
#include <fcntl.h>
@@ -213,7 +215,11 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
// Debug info has to be emitted first.
if (FLAG_perf_prof && shared != nullptr) {
- LogWriteDebugInfo(code, shared);
+ // TODO(herhut): This currently breaks for js2wasm/wasm2js functions.
+ if (code->kind() != Code::JS_TO_WASM_FUNCTION &&
+ code->kind() != Code::WASM_TO_JS_FUNCTION) {
+ LogWriteDebugInfo(code, shared);
+ }
}
const char* code_name = name;
@@ -226,6 +232,58 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
// Unwinding info comes right after debug info.
if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
+ WriteJitCodeLoadEntry(code_pointer, code_size, code_name, length);
+}
+
+void PerfJitLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) {
+ base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+ if (perf_output_handle_ == nullptr) return;
+
+ WriteJitCodeLoadEntry(code->instructions().start(),
+ code->instructions().length(), name, length);
+}
+
+void PerfJitLogger::WriteJitCodeLoadEntry(const uint8_t* code_pointer,
+ uint32_t code_size, const char* name,
+ int name_length) {
+ static const char string_terminator[] = "\0";
+
+ PerfJitCodeLoad code_load;
+ code_load.event_ = PerfJitCodeLoad::kLoad;
+ code_load.size_ = sizeof(code_load) + name_length + 1 + code_size;
+ code_load.time_stamp_ = GetTimestamp();
+ code_load.process_id_ =
+ static_cast<uint32_t>(base::OS::GetCurrentProcessId());
+ code_load.thread_id_ = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
+ code_load.vma_ = reinterpret_cast<uint64_t>(code_pointer);
+ code_load.code_address_ = reinterpret_cast<uint64_t>(code_pointer);
+ code_load.code_size_ = code_size;
+ code_load.code_id_ = code_index_;
+
+ code_index_++;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
+ LogWriteBytes(name, name_length);
+ LogWriteBytes(string_terminator, 1);
+ LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
+}
+
+void PerfJitLogger::LogRecordedBuffer(const InstructionStream* stream,
+ const char* name, int length) {
+ if (FLAG_perf_basic_prof_only_functions) return;
+
+ base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
+
+ if (perf_output_handle_ == nullptr) return;
+
+ const char* code_name = name;
+ uint8_t* code_pointer = stream->bytes();
+ uint32_t code_size = static_cast<uint32_t>(stream->byte_length());
+
+ // TODO(jgruber): Do we need unwinding info?
+
static const char string_terminator[] = "\0";
PerfJitCodeLoad code_load;
@@ -288,6 +346,8 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
entry_count++;
}
if (entry_count == 0) return;
+ // The WasmToJS wrapper stubs have source position entries.
+ if (!shared->HasSourceCode()) return;
Handle<Script> script(Script::cast(shared->script()));
PerfJitCodeDebugInfo debug_info;
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
index 2b0b4831e0..8e7c6b5939 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/perf-jit.h
@@ -54,6 +54,10 @@ class PerfJitLogger : public CodeEventLogger {
uint64_t GetTimestamp();
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override;
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override;
// Extension added to V8 log file name to get the low-level log name.
static const char kFilenameFormatString[];
@@ -63,6 +67,9 @@ class PerfJitLogger : public CodeEventLogger {
// minimize the associated overhead.
static const int kLogBufferSize = 2 * MB;
+ void WriteJitCodeLoadEntry(const uint8_t* code_pointer, uint32_t code_size,
+ const char* name, int name_length);
+
void LogWriteBytes(const char* bytes, int size);
void LogWriteHeader();
void LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared);
@@ -126,9 +133,20 @@ class PerfJitLogger : public CodeEventLogger {
const char* name, int length) override {
UNIMPLEMENTED();
}
+
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override {
+ UNIMPLEMENTED();
+ }
+
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override {
+ UNIMPLEMENTED();
+ }
};
#endif // V8_OS_LINUX
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_PERF_JIT_H_
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 451a1afa46..166a854638 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -46,8 +46,7 @@
namespace v8 {
namespace internal {
-
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return false; }
@@ -61,8 +60,8 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, constant_pool_);
- Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
- target + delta, SKIP_ICACHE_FLUSH);
+ Assembler::set_target_address_at(pc_, constant_pool_, target + delta,
+ SKIP_ICACHE_FLUSH);
}
}
@@ -177,7 +176,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -199,15 +198,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -217,15 +216,15 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -241,7 +240,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
}
-Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
void Assembler::UntrackBranch() {
DCHECK(!trampoline_emitted_);
@@ -414,16 +413,15 @@ Address Assembler::target_constant_pool_address_at(
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
-
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
@@ -431,11 +429,9 @@ void Assembler::deserialization_set_target_internal_reference_at(
// This code assumes the FIXED_SEQUENCE of lis/ori
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
-
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
if (IsConstantPoolLoadStart(pc, &access)) {
@@ -477,7 +473,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, p, 5 * kInstrSize);
+ Assembler::FlushICache(p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -492,7 +488,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, p, 2 * kInstrSize);
+ Assembler::FlushICache(p, 2 * kInstrSize);
}
#endif
return;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 90b18b02ba..eb16e46505 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -170,22 +170,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -240,7 +238,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
}
Address pc = buffer_ + request.offset();
Address constant_pool = nullptr;
- set_target_address_at(nullptr, pc, constant_pool,
+ set_target_address_at(pc, constant_pool,
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
}
@@ -2093,8 +2091,7 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
- set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
- SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, buffer_ + pos, SKIP_ICACHE_FLUSH);
}
reloc_info_writer.Write(&rinfo);
@@ -2150,10 +2147,6 @@ PatchingAssembler::~PatchingAssembler() {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
-void PatchingAssembler::FlushICache(Isolate* isolate) {
- Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 0204d65fa5..271c6e69db 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -373,18 +373,12 @@ C_REGISTERS(DECLARE_C_REGISTER)
// -----------------------------------------------------------------------------
// Machine instruction Operands
-#if V8_TARGET_ARCH_PPC64
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
-#else
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
-#endif
-
// Class Operand represents a shifter operand in data processing instructions
class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = kRelocInfo_NONEPTR)
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: rmode_(rmode)) {
value_.immediate = immediate;
}
@@ -394,7 +388,7 @@ class Operand BASE_EMBEDDED {
value_.immediate = reinterpret_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value) : rmode_(kRelocInfo_NONEPTR)) {
+ INLINE(explicit Operand(Smi* value) : rmode_(RelocInfo::NONE)) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
// rm
@@ -581,7 +575,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
@@ -595,12 +589,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -1664,8 +1657,6 @@ class PatchingAssembler : public Assembler {
public:
PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
~PatchingAssembler();
-
- void FlushICache(Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 5c3d38786f..742d89a590 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -449,6 +449,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
+ // Reset the masking register.
+ if (FLAG_branch_load_poisoning) {
+ __ ResetSpeculationPoisonRegister();
+ }
+
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(ip, Operand(pending_handler_entrypoint_address));
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 4641dc260c..b54a44c6ed 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -36,10 +36,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
- !RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 7e962e7849..5564fd9c32 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -269,6 +269,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
return 8;
}
+ break;
case 's': {
DCHECK_EQ(format[1], 'h');
int32_t value = 0;
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/ppc/frame-constants-ppc.h
index ee7f29937b..c822de877b 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/ppc/frame-constants-ppc.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_FRAMES_PPC_H_
-#define V8_PPC_FRAMES_PPC_H_
+#ifndef V8_PPC_FRAME_CONSTANTS_PPC_H_
+#define V8_PPC_FRAME_CONSTANTS_PPC_H_
#include "src/frame-constants.h"
@@ -47,4 +47,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_PPC_FRAMES_PPC_H_
+#endif // V8_PPC_FRAME_CONSTANTS_PPC_H_
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 069fcb26ad..23245b153b 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -67,12 +67,6 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r5, r6};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
@@ -290,8 +284,8 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 8d7c3d05b4..68efa84c72 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -15,6 +15,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -832,40 +833,28 @@ void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
}
-void TurboAssembler::LoadConstantPoolPointerRegister(Register base,
- int code_start_delta) {
- add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
- code_start_delta);
-}
-
void TurboAssembler::LoadConstantPoolPointerRegister() {
mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
}
-void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
- int prologue_offset) {
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
{
ConstantPoolUnavailableScope constant_pool_unavailable(this);
mov(r11, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(r11);
}
if (FLAG_enable_embedded_constant_pool) {
- if (base != no_reg) {
- // base contains prologue address
- LoadConstantPoolPointerRegister(base, -prologue_offset);
- } else {
- LoadConstantPoolPointerRegister();
- }
+ LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
-void TurboAssembler::Prologue(Register base, int prologue_offset) {
+void TurboAssembler::Prologue() {
DCHECK(base != no_reg);
PushStandardFrame(r4);
if (FLAG_enable_embedded_constant_pool) {
// base contains prologue address
- LoadConstantPoolPointerRegister(base, -prologue_offset);
+ LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
@@ -1189,14 +1178,33 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ mov(r7, Operand(debug_is_active));
+ LoadByte(r7, MemOperand(r7), r0);
+ extsb(r7, r7);
+ CmpSmiLiteral(r7, Smi::kZero, r0);
+ beq(&skip_hook);
+
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r7, Operand(debug_hook_avtive));
LoadByte(r7, MemOperand(r7), r0);
extsb(r7, r7);
CmpSmiLiteral(r7, Smi::kZero, r0);
- beq(&skip_hook);
+ bne(&call_hook);
+
+ LoadP(r7, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(r7, &skip_hook);
+ LoadP(r7, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
+ SmiUntag(r0, r7);
+ andi(r0, r0, Operand(DebugInfo::kBreakAtEntry));
+ beq(&skip_hook, cr0);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1253,7 +1261,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = ip;
+ Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1307,14 +1315,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(r4, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(r4, function);
- InvokeFunction(r4, expected, actual, flag);
-}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1626,6 +1626,11 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ intptr_t bytes_address = reinterpret_cast<intptr_t>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+ Jump(kOffHeapTrampolineRegister);
+}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
@@ -1667,7 +1672,7 @@ void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
+ const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2922,6 +2927,9 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
}
#endif
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ mov(kSpeculationPoisonRegister, Operand(-1));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index c67ef4ab90..f4d9afd47f 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -15,20 +15,23 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = r3;
-const Register kReturnRegister1 = r4;
-const Register kReturnRegister2 = r5;
-const Register kJSFunctionRegister = r4;
-const Register kContextRegister = r30;
-const Register kAllocateSizeRegister = r4;
-const Register kInterpreterAccumulatorRegister = r3;
-const Register kInterpreterBytecodeOffsetRegister = r15;
-const Register kInterpreterBytecodeArrayRegister = r16;
-const Register kInterpreterDispatchTableRegister = r17;
-const Register kJavaScriptCallArgCountRegister = r3;
-const Register kJavaScriptCallNewTargetRegister = r6;
-const Register kRuntimeCallFunctionRegister = r4;
-const Register kRuntimeCallArgCountRegister = r3;
+constexpr Register kReturnRegister0 = r3;
+constexpr Register kReturnRegister1 = r4;
+constexpr Register kReturnRegister2 = r5;
+constexpr Register kJSFunctionRegister = r4;
+constexpr Register kContextRegister = r30;
+constexpr Register kAllocateSizeRegister = r4;
+constexpr Register kSpeculationPoisonRegister = r14;
+constexpr Register kInterpreterAccumulatorRegister = r3;
+constexpr Register kInterpreterBytecodeOffsetRegister = r15;
+constexpr Register kInterpreterBytecodeArrayRegister = r16;
+constexpr Register kInterpreterDispatchTableRegister = r17;
+constexpr Register kJavaScriptCallArgCountRegister = r3;
+constexpr Register kJavaScriptCallNewTargetRegister = r6;
+constexpr Register kJavaScriptCallCodeStartRegister = r5;
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r4;
+constexpr Register kRuntimeCallArgCountRegister = r3;
// ----------------------------------------------------------------------------
// Static helper functions
@@ -172,9 +175,8 @@ class TurboAssembler : public Assembler {
void PushCommonFrame(Register marker_reg = no_reg);
// Generates function and stub prologue code.
- void StubPrologue(StackFrame::Type type, Register base = no_reg,
- int prologue_offset = 0);
- void Prologue(Register base, int prologue_offset = 0);
+ void StubPrologue(StackFrame::Type type);
+ void Prologue();
// Push a standard frame, consisting of lr, fp, constant pool,
// context and JS function
@@ -639,7 +641,6 @@ class TurboAssembler : public Assembler {
void CallStubDelayed(CodeStub* stub);
void LoadConstantPoolPointerRegister();
- void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
void AbortConstantPoolBuilding() {
#ifdef DEBUG
// Avoid DCHECK(!is_linked()) failure in ~Label()
@@ -647,6 +648,8 @@ class TurboAssembler : public Assembler {
#endif
}
+ void ResetSpeculationPoisonRegister();
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -829,10 +832,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
void DebugBreak();
// Frame restart support
void MaybeDropFrames();
@@ -933,6 +932,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// ---------------------------------------------------------------------------
// StatsCounter support
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index a92e5363ea..6c517038bb 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -639,8 +639,7 @@ void PPCDebugger::Debug() {
#undef XSTR
}
-
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -738,11 +737,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
#if V8_TARGET_ARCH_PPC64
@@ -2161,50 +2155,50 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
- case STFSUX: {
- case STFSX:
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int rb = instr->RBValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- intptr_t rb_val = get_register(rb);
- float frs_val = static_cast<float>(get_double_from_d_register(frs));
- int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+ case STFSUX: V8_FALLTHROUGH;
+ case STFSX: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ float frs_val = static_cast<float>(get_double_from_d_register(frs));
+ int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- // Conversion using double changes sNan to qNan on ia32/x64
- int32_t sval = 0;
- int64_t dval = get_d_register(frs);
- if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
- sval = ((dval & 0xC000000000000000) >> 32) |
- ((dval & 0x07FFFFFFE0000000) >> 29);
- p = &sval;
- } else {
- p = reinterpret_cast<int32_t*>(&frs_val);
- }
-#else
+ // Conversion using double changes sNan to qNan on ia32/x64
+ int32_t sval = 0;
+ int64_t dval = get_d_register(frs);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
+ p = &sval;
+ } else {
p = reinterpret_cast<int32_t*>(&frs_val);
+ }
+#else
+ p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + rb_val, *p, instr);
- if (opcode == STFSUX) {
- DCHECK_NE(ra, 0);
- set_register(ra, ra_val + rb_val);
- }
- break;
+ WriteW(ra_val + rb_val, *p, instr);
+ if (opcode == STFSUX) {
+ DCHECK_NE(ra, 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
}
- case STFDUX: {
- case STFDX:
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int rb = instr->RBValue();
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- intptr_t rb_val = get_register(rb);
- int64_t frs_val = get_d_register(frs);
- WriteDW(ra_val + rb_val, frs_val);
- if (opcode == STFDUX) {
- DCHECK_NE(ra, 0);
- set_register(ra, ra_val + rb_val);
- }
- break;
+ case STFDUX: V8_FALLTHROUGH;
+ case STFDX: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ intptr_t rb_val = get_register(rb);
+ int64_t frs_val = get_d_register(frs);
+ WriteDW(ra_val + rb_val, frs_val);
+ if (opcode == STFDUX) {
+ DCHECK_NE(ra, 0);
+ set_register(ra, ra_val + rb_val);
+ }
+ break;
}
case POPCNTW: {
int rs = instr->RSValue();
@@ -3220,36 +3214,35 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
- case STFSU: {
- case STFS:
- int frs = instr->RSValue();
- int ra = instr->RAValue();
- int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
- intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
- float frs_val = static_cast<float>(get_double_from_d_register(frs));
- int32_t* p;
+ case STFSU: V8_FALLTHROUGH;
+ case STFS: {
+ int frs = instr->RSValue();
+ int ra = instr->RAValue();
+ int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+ intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+ float frs_val = static_cast<float>(get_double_from_d_register(frs));
+ int32_t* p;
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- // Conversion using double changes sNan to qNan on ia32/x64
- int32_t sval = 0;
- int64_t dval = get_d_register(frs);
- if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
- sval = ((dval & 0xC000000000000000) >> 32) |
- ((dval & 0x07FFFFFFE0000000) >> 29);
- p = &sval;
- } else {
- p = reinterpret_cast<int32_t*>(&frs_val);
- }
-#else
+ // Conversion using double changes sNan to qNan on ia32/x64
+ int32_t sval = 0;
+ int64_t dval = get_d_register(frs);
+ if ((dval & 0x7FF0000000000000) == 0x7FF0000000000000) {
+ sval = ((dval & 0xC000000000000000) >> 32) |
+ ((dval & 0x07FFFFFFE0000000) >> 29);
+ p = &sval;
+ } else {
p = reinterpret_cast<int32_t*>(&frs_val);
+ }
+#else
+ p = reinterpret_cast<int32_t*>(&frs_val);
#endif
- WriteW(ra_val + offset, *p, instr);
- if (opcode == STFSU) {
- DCHECK_NE(ra, 0);
- set_register(ra, ra_val + offset);
- }
- break;
+ WriteW(ra_val + offset, *p, instr);
+ if (opcode == STFSU) {
+ DCHECK_NE(ra, 0);
+ set_register(ra, ra_val + offset);
+ }
+ break;
}
-
case STFDU:
case STFD: {
int frs = instr->RSValue();
@@ -3916,7 +3909,7 @@ void Simulator::Trace(Instruction* instr) {
// Executes the current instruction.
void Simulator::ExecuteInstruction(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 544b9d463e..45b350b742 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -190,6 +190,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -328,9 +329,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation
- base::CustomMatcherHashMap* i_cache_;
-
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 8ee4527234..b2b9392319 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -5,6 +5,7 @@
#include "src/profiler/allocation-tracker.h"
#include "src/frames-inl.h"
+#include "src/global-handles.h"
#include "src/objects-inl.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index a84fd4a8fd..cd9e120db2 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -154,4 +154,4 @@ class AllocationTracker {
} // namespace internal
} // namespace v8
-#endif // V8_ALLOCATION_TRACKER_H_
+#endif // V8_PROFILER_ALLOCATION_TRACKER_H_
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index a915ebd511..841ce6000f 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -208,7 +208,7 @@ int CpuProfiler::GetProfilesCount() {
CpuProfile* CpuProfiler::GetProfile(int index) {
- return profiles_->profiles()->at(index);
+ return profiles_->profiles()->at(index).get();
}
@@ -220,7 +220,6 @@ void CpuProfiler::DeleteAllProfiles() {
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
profiles_->RemoveProfile(profile);
- delete profile;
if (profiles_->profiles()->empty() && !is_profiling_) {
// If this was the last profile, clean up all accessory data as well.
ResetProfiles();
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 8f0afdc771..9dbe3ff5bd 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -17,28 +17,22 @@ namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
: ids_(new HeapObjectsMap(heap)),
names_(new StringsStorage(heap)),
- is_tracking_object_moves_(false),
- get_retainer_infos_callback_(nullptr) {}
-
-static void DeleteHeapSnapshot(HeapSnapshot* snapshot_ptr) {
- delete snapshot_ptr;
-}
-
-
-HeapProfiler::~HeapProfiler() {
- std::for_each(snapshots_.begin(), snapshots_.end(), &DeleteHeapSnapshot);
-}
+ is_tracking_object_moves_(false) {}
+HeapProfiler::~HeapProfiler() = default;
void HeapProfiler::DeleteAllSnapshots() {
- std::for_each(snapshots_.begin(), snapshots_.end(), &DeleteHeapSnapshot);
snapshots_.clear();
names_.reset(new StringsStorage(heap()));
}
void HeapProfiler::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.erase(std::find(snapshots_.begin(), snapshots_.end(), snapshot));
+ snapshots_.erase(
+ std::find_if(snapshots_.begin(), snapshots_.end(),
+ [&](const std::unique_ptr<HeapSnapshot>& entry) {
+ return entry.get() == snapshot;
+ }));
}
@@ -75,6 +69,18 @@ v8::HeapProfiler::RetainerInfos HeapProfiler::GetRetainerInfos(
return infos;
}
+void HeapProfiler::SetBuildEmbedderGraphCallback(
+ v8::HeapProfiler::BuildEmbedderGraphCallback callback) {
+ build_embedder_graph_callback_ = callback;
+}
+
+void HeapProfiler::BuildEmbedderGraph(Isolate* isolate,
+ v8::EmbedderGraph* graph) {
+ if (build_embedder_graph_callback_ != nullptr)
+ build_embedder_graph_callback_(reinterpret_cast<v8::Isolate*>(isolate),
+ graph);
+}
+
HeapSnapshot* HeapProfiler::TakeSnapshot(
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
@@ -85,7 +91,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
delete result;
result = nullptr;
} else {
- snapshots_.push_back(result);
+ snapshots_.emplace_back(result);
}
}
ids_->RemoveDeadEntries();
@@ -153,7 +159,7 @@ int HeapProfiler::GetSnapshotsCount() {
}
HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
- return snapshots_.at(index);
+ return snapshots_.at(index).get();
}
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index da6814ddcb..d37a882805 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -8,7 +8,11 @@
#include <memory>
#include <vector>
-#include "src/isolate.h"
+#include "include/v8-profiler.h"
+#include "src/base/platform/mutex.h"
+#include "src/debug/debug-interface.h"
+#include "src/globals.h"
+#include "src/heap/heap.h"
namespace v8 {
namespace internal {
@@ -65,9 +69,15 @@ class HeapProfiler {
void SetGetRetainerInfosCallback(
v8::HeapProfiler::GetRetainerInfosCallback callback);
-
v8::HeapProfiler::RetainerInfos GetRetainerInfos(Isolate* isolate);
+ void SetBuildEmbedderGraphCallback(
+ v8::HeapProfiler::BuildEmbedderGraphCallback callback);
+ void BuildEmbedderGraph(Isolate* isolate, v8::EmbedderGraph* graph);
+ bool HasBuildEmbedderGraphCallback() {
+ return build_embedder_graph_callback_ != nullptr;
+ }
+
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
bool is_tracking_allocations() const { return !!allocation_tracker_; }
@@ -85,14 +95,17 @@ class HeapProfiler {
// Mapping from HeapObject addresses to objects' uids.
std::unique_ptr<HeapObjectsMap> ids_;
- std::vector<HeapSnapshot*> snapshots_;
+ std::vector<std::unique_ptr<HeapSnapshot>> snapshots_;
std::unique_ptr<StringsStorage> names_;
std::vector<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
std::unique_ptr<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
- v8::HeapProfiler::GetRetainerInfosCallback get_retainer_infos_callback_;
+ v8::HeapProfiler::GetRetainerInfosCallback get_retainer_infos_callback_ =
+ nullptr;
+ v8::HeapProfiler::BuildEmbedderGraphCallback build_embedder_graph_callback_ =
+ nullptr;
DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
};
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 40779d9e5f..b1e033c5f5 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -10,6 +10,7 @@
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
+#include "src/global-handles.h"
#include "src/layout-descriptor.h"
#include "src/objects-body-descriptors.h"
#include "src/objects-inl.h"
@@ -176,7 +177,7 @@ HeapSnapshot::HeapSnapshot(HeapProfiler* profiler)
((kPointerSize == 8) && (sizeof(HeapGraphEdge) == 24)));
STATIC_ASSERT(((kPointerSize == 4) && (sizeof(HeapEntry) == 28)) ||
((kPointerSize == 8) && (sizeof(HeapEntry) == 40)));
- for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+ for (int i = 0; i < static_cast<int>(Root::kNumberOfRoots); ++i) {
gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
}
}
@@ -184,7 +185,6 @@ HeapSnapshot::HeapSnapshot(HeapProfiler* profiler)
void HeapSnapshot::Delete() {
profiler_->RemoveSnapshot(this);
- delete this;
}
@@ -197,8 +197,8 @@ void HeapSnapshot::AddSyntheticRootEntries() {
AddRootEntry();
AddGcRootsEntry();
SnapshotObjectId id = HeapObjectsMap::kGcRootsFirstSubrootId;
- for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
- AddGcSubrootEntry(tag, id);
+ for (int root = 0; root < static_cast<int>(Root::kNumberOfRoots); root++) {
+ AddGcSubrootEntry(static_cast<Root>(root), id);
id += HeapObjectsMap::kObjectIdStep;
}
DCHECK_EQ(HeapObjectsMap::kFirstAvailableObjectId, id);
@@ -230,13 +230,11 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() {
return entry;
}
-
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag, SnapshotObjectId id) {
- DCHECK_EQ(gc_subroot_indexes_[tag], HeapEntry::kNoEntry);
- DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
- HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
- VisitorSynchronization::kTagNames[tag], id, 0, 0);
- gc_subroot_indexes_[tag] = entry->index();
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(Root root, SnapshotObjectId id) {
+ DCHECK_EQ(gc_subroot_indexes_[static_cast<int>(root)], HeapEntry::kNoEntry);
+ HeapEntry* entry =
+ AddEntry(HeapEntry::kSynthetic, RootVisitor::RootName(root), id, 0, 0);
+ gc_subroot_indexes_[static_cast<int>(root)] = entry->index();
return entry;
}
@@ -307,7 +305,7 @@ const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
HeapObjectsMap::kGcRootsFirstSubrootId +
- VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
+ static_cast<int>(Root::kNumberOfRoots) * HeapObjectsMap::kObjectIdStep;
HeapObjectsMap::HeapObjectsMap(Heap* heap)
: next_id_(kFirstAvailableObjectId), heap_(heap) {
@@ -733,15 +731,15 @@ class SnapshotFiller {
HeapEntry* parent_entry = &snapshot_->entries()[parent];
parent_entry->SetNamedReference(type, reference_name, child_entry);
}
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type, int parent,
+ const char* description,
HeapEntry* child_entry) {
HeapEntry* parent_entry = &snapshot_->entries()[parent];
int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- names_->GetName(index),
- child_entry);
+ const char* name = description
+ ? names_->GetFormatted("%d / %s", index, description)
+ : names_->GetName(index);
+ parent_entry->SetNamedReference(type, name, child_entry);
}
private:
@@ -857,6 +855,8 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractCodeReferences(entry, Code::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
+ } else if (obj->IsFeedbackCell()) {
+ ExtractFeedbackCellReferences(entry, FeedbackCell::cast(obj));
} else if (obj->IsWeakCell()) {
ExtractWeakCellReferences(entry, WeakCell::cast(obj));
} else if (obj->IsPropertyCell()) {
@@ -929,11 +929,10 @@ void V8HeapExplorer::ExtractJSObjectReferences(
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
- TagObject(js_fun->feedback_vector_cell(),
- "(function feedback vector cell)");
- SetInternalReference(js_fun, entry, "feedback_vector_cell",
- js_fun->feedback_vector_cell(),
- JSFunction::kFeedbackVectorOffset);
+ TagObject(js_fun->feedback_cell(), "(function feedback cell)");
+ SetInternalReference(js_fun, entry, "feedback_cell",
+ js_fun->feedback_cell(),
+ JSFunction::kFeedbackCellOffset);
TagObject(shared_info, "(shared function info)");
SetInternalReference(js_fun, entry,
"shared", shared_info,
@@ -1148,9 +1147,6 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
"scope_info", shared->scope_info(),
SharedFunctionInfo::kScopeInfoOffset);
SetInternalReference(obj, entry,
- "instance_class_name", shared->instance_class_name(),
- SharedFunctionInfo::kInstanceClassNameOffset);
- SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
const char* construct_stub_name = name ?
@@ -1234,9 +1230,6 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"relocation_info", code->relocation_info(),
Code::kRelocationInfoOffset);
- SetInternalReference(code, entry,
- "handler_table", code->handler_table(),
- Code::kHandlerTableOffset);
TagObject(code->deoptimization_data(), "(code deopt data)");
SetInternalReference(code, entry,
"deoptimization_data", code->deoptimization_data(),
@@ -1251,6 +1244,13 @@ void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
}
+void V8HeapExplorer::ExtractFeedbackCellReferences(
+ int entry, FeedbackCell* feedback_cell) {
+ TagObject(feedback_cell, "(feedback cell)");
+ SetInternalReference(feedback_cell, entry, "value", feedback_cell->value(),
+ FeedbackCell::kValueOffset);
+}
+
void V8HeapExplorer::ExtractWeakCellReferences(int entry, WeakCell* weak_cell) {
TagObject(weak_cell, "(weak cell)");
SetWeakReference(weak_cell, entry, "value", weak_cell->value(),
@@ -1312,23 +1312,9 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(
}
void V8HeapExplorer::ExtractJSPromiseReferences(int entry, JSPromise* promise) {
- SetInternalReference(promise, entry, "result", promise->result(),
- JSPromise::kResultOffset);
- SetInternalReference(promise, entry, "deferred_promise",
- promise->deferred_promise(),
- JSPromise::kDeferredPromiseOffset);
- SetInternalReference(promise, entry, "deferred_on_resolve",
- promise->deferred_on_resolve(),
- JSPromise::kDeferredOnResolveOffset);
- SetInternalReference(promise, entry, "deferred_on_reject",
- promise->deferred_on_reject(),
- JSPromise::kDeferredOnRejectOffset);
- SetInternalReference(promise, entry, "fulfill_reactions",
- promise->fulfill_reactions(),
- JSPromise::kFulfillReactionsOffset);
- SetInternalReference(promise, entry, "reject_reactions",
- promise->reject_reactions(),
- JSPromise::kRejectReactionsOffset);
+ SetInternalReference(promise, entry, "reactions_or_result",
+ promise->reactions_or_result(),
+ JSPromise::kReactionsOrResultOffset);
}
void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
@@ -1347,12 +1333,20 @@ void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
int key_index =
ObjectHashTable::EntryToIndex(i) + ObjectHashTable::kEntryKeyIndex;
int value_index = ObjectHashTable::EntryToValueIndex(i);
- SetWeakReference(table, entry, key_index, table->get(key_index),
+ Object* key = table->get(key_index);
+ Object* value = table->get(value_index);
+ SetWeakReference(table, entry, key_index, key,
table->OffsetOfElementAt(key_index));
- SetInternalReference(table, entry, value_index, table->get(value_index),
+ SetInternalReference(table, entry, value_index, value,
table->OffsetOfElementAt(value_index));
- // TODO(alph): Add a strong link (shortcut?) from key to value per
- // WeakMap the key was added to. See crbug.com/778739
+ HeapEntry* key_entry = GetEntry(key);
+ int key_entry_index = key_entry->index();
+ HeapEntry* value_entry = GetEntry(value);
+ if (key_entry && value_entry) {
+ filler_->SetNamedAutoIndexReference(HeapGraphEdge::kInternal,
+ key_entry_index, "WeakMap",
+ value_entry);
+ }
}
break;
}
@@ -1498,73 +1492,30 @@ HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
}
class RootsReferencesExtractor : public RootVisitor {
- private:
- struct IndexTag {
- IndexTag(size_t index, VisitorSynchronization::SyncTag tag)
- : index(index), tag(tag) {}
- size_t index;
- VisitorSynchronization::SyncTag tag;
- };
-
public:
- explicit RootsReferencesExtractor(Heap* heap)
- : collecting_all_references_(false),
- previous_reference_count_(0),
- heap_(heap) {
- }
+ explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
+ : explorer_(explorer), visiting_weak_roots_(false) {}
- void VisitRootPointers(Root root, Object** start, Object** end) override {
- if (collecting_all_references_) {
- for (Object** p = start; p < end; p++) all_references_.push_back(*p);
- } else {
- for (Object** p = start; p < end; p++) strong_references_.push_back(*p);
- }
- }
+ void SetVisitingWeakRoots() { visiting_weak_roots_ = true; }
- void SetCollectingAllReferences() { collecting_all_references_ = true; }
-
- void FillReferences(V8HeapExplorer* explorer) {
- DCHECK_LE(strong_references_.size(), all_references_.size());
- Builtins* builtins = heap_->isolate()->builtins();
- USE(builtins);
- size_t strong_index = 0, all_index = 0, tags_index = 0;
- int builtin_index = 0;
- while (all_index < all_references_.size()) {
- bool is_strong =
- strong_index < strong_references_.size() &&
- strong_references_[strong_index] == all_references_[all_index];
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- !is_strong,
- all_references_[all_index]);
- if (reference_tags_[tags_index].tag ==
- VisitorSynchronization::kBuiltins) {
- DCHECK(all_references_[all_index]->IsCode());
- explorer->TagBuiltinCodeObject(
- Code::cast(all_references_[all_index]),
- builtins->name(builtin_index++));
- }
- ++all_index;
- if (is_strong) ++strong_index;
- if (reference_tags_[tags_index].index == all_index) ++tags_index;
+ void VisitRootPointer(Root root, const char* description,
+ Object** object) override {
+ if (root == Root::kBuiltins) {
+ explorer_->TagBuiltinCodeObject(Code::cast(*object), description);
}
- CHECK_EQ(strong_index, strong_references_.size());
+ explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
+ *object);
}
- void Synchronize(VisitorSynchronization::SyncTag tag) override {
- if (collecting_all_references_ &&
- previous_reference_count_ != all_references_.size()) {
- previous_reference_count_ = all_references_.size();
- reference_tags_.emplace_back(previous_reference_count_, tag);
- }
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ for (Object** p = start; p < end; p++)
+ VisitRootPointer(root, description, p);
}
private:
- bool collecting_all_references_;
- std::vector<Object*> strong_references_;
- std::vector<Object*> all_references_;
- size_t previous_reference_count_;
- std::vector<IndexTag> reference_tags_;
- Heap* heap_;
+ V8HeapExplorer* explorer_;
+ bool visiting_weak_roots_;
};
@@ -1574,18 +1525,17 @@ bool V8HeapExplorer::IterateAndExtractReferences(
// Create references to the synthetic roots.
SetRootGcRootsReference();
- for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
- SetGcRootsReference(static_cast<VisitorSynchronization::SyncTag>(tag));
+ for (int root = 0; root < static_cast<int>(Root::kNumberOfRoots); root++) {
+ SetGcRootsReference(static_cast<Root>(root));
}
// Make sure builtin code objects get their builtin tags
// first. Otherwise a particular JSFunction object could set
// its custom name to a generic builtin.
- RootsReferencesExtractor extractor(heap_);
+ RootsReferencesExtractor extractor(this);
heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
- extractor.SetCollectingAllReferences();
- heap_->IterateRoots(&extractor, VISIT_ALL);
- extractor.FillReferences(this);
+ extractor.SetVisitingWeakRoots();
+ heap_->IterateWeakGlobalHandles(&extractor);
// We have to do two passes as sometimes FixedArrays are used
// to weakly hold their items, and it's impossible to distinguish
@@ -1846,39 +1796,31 @@ void V8HeapExplorer::SetRootGcRootsReference() {
void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
DCHECK_NOT_NULL(child_entry);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kShortcut,
- snapshot_->root()->index(),
- child_entry);
+ filler_->SetNamedAutoIndexReference(HeapGraphEdge::kShortcut,
+ snapshot_->root()->index(), nullptr,
+ child_entry);
}
-void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->gc_roots()->index(),
- snapshot_->gc_subroot(tag));
+void V8HeapExplorer::SetGcRootsReference(Root root) {
+ filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ snapshot_->gc_roots()->index(),
+ snapshot_->gc_subroot(root));
}
-void V8HeapExplorer::SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
+void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
+ bool is_weak, Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
const char* name = GetStrongGcSubrootName(child_obj);
+ HeapGraphEdge::Type edge_type =
+ is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kInternal;
if (name != nullptr) {
- DCHECK(!is_weak);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- snapshot_->gc_subroot(tag)->index(), name,
- child_entry);
+ filler_->SetNamedReference(edge_type, snapshot_->gc_subroot(root)->index(),
+ name, child_entry);
} else {
- if (is_weak) {
- filler_->SetNamedAutoIndexReference(HeapGraphEdge::kWeak,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
- } else {
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
- }
+ filler_->SetNamedAutoIndexReference(edge_type,
+ snapshot_->gc_subroot(root)->index(),
+ description, child_entry);
}
// Add a shortcut to JS global object reference at snapshot root.
@@ -1945,7 +1887,8 @@ void V8HeapExplorer::TagFixedArraySubType(const FixedArray* array,
class GlobalObjectsEnumerator : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsNativeContext()) continue;
JSObject* proxy = Context::cast(*p)->global_proxy();
@@ -1983,6 +1926,57 @@ void V8HeapExplorer::TagGlobalObjects() {
}
}
+class EmbedderGraphImpl : public EmbedderGraph {
+ public:
+ struct Edge {
+ Node* from;
+ Node* to;
+ };
+
+ class V8NodeImpl : public Node {
+ public:
+ explicit V8NodeImpl(Object* object) : object_(object) {}
+ Object* GetObject() { return object_; }
+
+ // Node overrides.
+ bool IsEmbedderNode() override { return false; }
+ const char* Name() override {
+ // The name should be retrieved via GetObject().
+ UNREACHABLE();
+ return "";
+ }
+ size_t SizeInBytes() override {
+ // The size should be retrieved via GetObject().
+ UNREACHABLE();
+ return 0;
+ }
+
+ private:
+ Object* object_;
+ };
+
+ Node* V8Node(const v8::Local<v8::Value>& value) final {
+ Handle<Object> object = v8::Utils::OpenHandle(*value);
+ DCHECK(!object.is_null());
+ return AddNode(std::unique_ptr<Node>(new V8NodeImpl(*object)));
+ }
+
+ Node* AddNode(std::unique_ptr<Node> node) final {
+ Node* result = node.get();
+ nodes_.push_back(std::move(node));
+ return result;
+ }
+
+ void AddEdge(Node* from, Node* to) final { edges_.push_back({from, to}); }
+
+ const std::vector<std::unique_ptr<Node>>& nodes() { return nodes_; }
+ const std::vector<Edge>& edges() { return edges_; }
+
+ private:
+ std::vector<std::unique_ptr<Node>> nodes_;
+ std::vector<Edge> edges_;
+};
+
class GlobalHandlesExtractor : public PersistentHandleVisitor {
public:
explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
@@ -2034,6 +2028,60 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
0);
}
+class EmbedderGraphEntriesAllocator : public HeapEntriesAllocator {
+ public:
+ explicit EmbedderGraphEntriesAllocator(HeapSnapshot* snapshot)
+ : snapshot_(snapshot),
+ names_(snapshot_->profiler()->names()),
+ heap_object_map_(snapshot_->profiler()->heap_object_map()) {}
+ virtual HeapEntry* AllocateEntry(HeapThing ptr);
+
+ private:
+ HeapSnapshot* snapshot_;
+ StringsStorage* names_;
+ HeapObjectsMap* heap_object_map_;
+};
+
+namespace {
+
+const char* EmbedderGraphNodeName(StringsStorage* names,
+ EmbedderGraphImpl::Node* node) {
+ const char* prefix = node->NamePrefix();
+ return prefix ? names->GetFormatted("%s %s", prefix, node->Name())
+ : names->GetCopy(node->Name());
+}
+
+HeapEntry::Type EmbedderGraphNodeType(EmbedderGraphImpl::Node* node) {
+ return HeapEntry::kNative;
+}
+
+// Merges the names of an embedder node and its wrapper node.
+// If the wrapper node name contains a tag suffix (part after '/') then the
+// result is the embedder node name concatenated with the tag suffix.
+// Otherwise, the result is the embedder node name.
+const char* MergeNames(StringsStorage* names, const char* embedder_name,
+ const char* wrapper_name) {
+ for (const char* suffix = wrapper_name; *suffix; suffix++) {
+ if (*suffix == '/') {
+ return names->GetFormatted("%s %s", embedder_name, suffix);
+ }
+ }
+ return embedder_name;
+}
+
+} // anonymous namespace
+
+HeapEntry* EmbedderGraphEntriesAllocator::AllocateEntry(HeapThing ptr) {
+ EmbedderGraphImpl::Node* node =
+ reinterpret_cast<EmbedderGraphImpl::Node*>(ptr);
+ DCHECK(node->IsEmbedderNode());
+ size_t size = node->SizeInBytes();
+ return snapshot_->AddEntry(
+ EmbedderGraphNodeType(node), EmbedderGraphNodeName(names_, node),
+ static_cast<SnapshotObjectId>(reinterpret_cast<uintptr_t>(node) << 1),
+ static_cast<int>(size), 0);
+}
+
NativeObjectsExplorer::NativeObjectsExplorer(
HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
: isolate_(snapshot->profiler()->heap_object_map()->heap()->isolate()),
@@ -2042,13 +2090,13 @@ NativeObjectsExplorer::NativeObjectsExplorer(
embedder_queried_(false),
objects_by_info_(RetainedInfosMatch),
native_groups_(StringsMatch),
- filler_(nullptr) {
- synthetic_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
- native_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
-}
-
+ synthetic_entries_allocator_(
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic)),
+ native_entries_allocator_(
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative)),
+ embedder_graph_entries_allocator_(
+ new EmbedderGraphEntriesAllocator(snapshot)),
+ filler_(nullptr) {}
NativeObjectsExplorer::~NativeObjectsExplorer() {
for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
@@ -2066,8 +2114,6 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
info->Dispose();
}
- delete synthetic_entries_allocator_;
- delete native_entries_allocator_;
}
@@ -2114,13 +2160,14 @@ void NativeObjectsExplorer::FillEdges() {
*pair.first->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
HeapObject* parent = HeapObject::cast(*parent_object);
int parent_entry =
- filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
+ filler_->FindOrAddEntry(parent, native_entries_allocator_.get())
+ ->index();
DCHECK_NE(parent_entry, HeapEntry::kNoEntry);
Handle<Object> child_object = v8::Utils::OpenHandle(
*pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
HeapObject* child = HeapObject::cast(*child_object);
HeapEntry* child_entry =
- filler_->FindOrAddEntry(child, native_entries_allocator_);
+ filler_->FindOrAddEntry(child, native_entries_allocator_.get());
filler_->SetNamedReference(HeapGraphEdge::kInternal, parent_entry, "native",
child_entry);
}
@@ -2139,25 +2186,83 @@ std::vector<HeapObject*>* NativeObjectsExplorer::GetVectorMaybeDisposeInfo(
return reinterpret_cast<std::vector<HeapObject*>*>(entry->value);
}
+HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
+ EmbedderGraphImpl::Node* node) {
+ EmbedderGraphImpl::Node* wrapper = node->WrapperNode();
+ if (wrapper) {
+ node = wrapper;
+ }
+ if (node->IsEmbedderNode()) {
+ return filler_->FindOrAddEntry(node,
+ embedder_graph_entries_allocator_.get());
+ } else {
+ EmbedderGraphImpl::V8NodeImpl* v8_node =
+ static_cast<EmbedderGraphImpl::V8NodeImpl*>(node);
+ Object* object = v8_node->GetObject();
+ if (object->IsSmi()) return nullptr;
+ HeapEntry* entry = filler_->FindEntry(HeapObject::cast(object));
+ return entry;
+ }
+}
bool NativeObjectsExplorer::IterateAndExtractReferences(
SnapshotFiller* filler) {
filler_ = filler;
- FillRetainedObjects();
- FillEdges();
- if (EstimateObjectsCount() > 0) {
- for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- SetNativeRootReference(info);
- std::vector<HeapObject*>* objects =
- reinterpret_cast<std::vector<HeapObject*>*>(p->value);
- for (HeapObject* object : *objects) {
- SetWrapperNativeReferences(object, info);
+
+ if (FLAG_heap_profiler_use_embedder_graph &&
+ snapshot_->profiler()->HasBuildEmbedderGraphCallback()) {
+ v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
+ DisallowHeapAllocation no_allocation;
+ EmbedderGraphImpl graph;
+ snapshot_->profiler()->BuildEmbedderGraph(isolate_, &graph);
+ for (const auto& node : graph.nodes()) {
+ if (node->IsRootNode()) {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement, snapshot_->root()->index(),
+ EntryForEmbedderGraphNode(node.get()));
+ }
+ // Adjust the name and the type of the V8 wrapper node.
+ auto wrapper = node->WrapperNode();
+ if (wrapper) {
+ HeapEntry* wrapper_entry = EntryForEmbedderGraphNode(wrapper);
+ wrapper_entry->set_name(
+ MergeNames(names_, EmbedderGraphNodeName(names_, node.get()),
+ wrapper_entry->name()));
+ wrapper_entry->set_type(EmbedderGraphNodeType(node.get()));
+ }
+ }
+ // Fill edges of the graph.
+ for (const auto& edge : graph.edges()) {
+ HeapEntry* from = EntryForEmbedderGraphNode(edge.from);
+ // The |from| and |to| can nullptr if the corrsponding node is a V8 node
+ // pointing to a Smi.
+ if (!from) continue;
+ // Adding an entry for |edge.to| can invalidate the |from| entry because
+ // it is an address in std::vector. Use index instead of pointer.
+ int from_index = from->index();
+ HeapEntry* to = EntryForEmbedderGraphNode(edge.to);
+ if (to) {
+ filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ from_index, to);
+ }
+ }
+ } else {
+ FillRetainedObjects();
+ FillEdges();
+ if (EstimateObjectsCount() > 0) {
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != nullptr;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ SetNativeRootReference(info);
+ std::vector<HeapObject*>* objects =
+ reinterpret_cast<std::vector<HeapObject*>*>(p->value);
+ for (HeapObject* object : *objects) {
+ SetWrapperNativeReferences(object, info);
+ }
}
+ SetRootNativeRootsReference();
}
- SetRootNativeRootsReference();
}
filler_ = nullptr;
return true;
@@ -2210,19 +2315,17 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
void NativeObjectsExplorer::SetNativeRootReference(
v8::RetainedObjectInfo* info) {
HeapEntry* child_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
+ filler_->FindOrAddEntry(info, native_entries_allocator_.get());
DCHECK_NOT_NULL(child_entry);
NativeGroupRetainedObjectInfo* group_info =
FindOrAddGroupInfo(info->GetGroupLabel());
HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
+ filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_.get());
// |FindOrAddEntry| can move and resize the entries backing store. Reload
// potentially-stale pointer.
child_entry = filler_->FindEntry(info);
filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kInternal,
- group_entry->index(),
- child_entry);
+ HeapGraphEdge::kInternal, group_entry->index(), nullptr, child_entry);
}
@@ -2231,7 +2334,7 @@ void NativeObjectsExplorer::SetWrapperNativeReferences(
HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
DCHECK_NOT_NULL(wrapper_entry);
HeapEntry* info_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
+ filler_->FindOrAddEntry(info, native_entries_allocator_.get());
DCHECK_NOT_NULL(info_entry);
filler_->SetNamedReference(HeapGraphEdge::kInternal,
wrapper_entry->index(),
@@ -2249,7 +2352,7 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
NativeGroupRetainedObjectInfo* group_info =
static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, native_entries_allocator_);
+ filler_->FindOrAddEntry(group_info, native_entries_allocator_.get());
DCHECK_NOT_NULL(group_entry);
filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 2dacd5a9fe..5c7d88e0ca 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -113,6 +113,7 @@ class HeapEntry BASE_EMBEDDED {
HeapSnapshot* snapshot() { return snapshot_; }
Type type() const { return static_cast<Type>(type_); }
+ void set_type(Type type) { type_ = type; }
const char* name() const { return name_; }
void set_name(const char* name) { name_ = name; }
SnapshotObjectId id() const { return id_; }
@@ -165,8 +166,8 @@ class HeapSnapshot {
HeapProfiler* profiler() { return profiler_; }
HeapEntry* root() { return &entries_[root_index_]; }
HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* gc_subroot(int index) {
- return &entries_[gc_subroot_indexes_[index]];
+ HeapEntry* gc_subroot(Root root) {
+ return &entries_[gc_subroot_indexes_[static_cast<int>(root)]];
}
std::vector<HeapEntry>& entries() { return entries_; }
std::deque<HeapGraphEdge>& edges() { return edges_; }
@@ -191,12 +192,12 @@ class HeapSnapshot {
private:
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(int tag, SnapshotObjectId id);
+ HeapEntry* AddGcSubrootEntry(Root root, SnapshotObjectId id);
HeapProfiler* profiler_;
int root_index_;
int gc_roots_index_;
- int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
+ int gc_subroot_indexes_[static_cast<int>(Root::kNumberOfRoots)];
std::vector<HeapEntry> entries_;
std::deque<HeapGraphEdge> edges_;
std::deque<HeapGraphEdge*> children_;
@@ -384,6 +385,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
void ExtractCodeReferences(int entry, Code* code);
void ExtractCellReferences(int entry, Cell* cell);
+ void ExtractFeedbackCellReferences(int entry, FeedbackCell* feedback_cell);
void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
@@ -445,9 +447,9 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void SetUserGlobalReference(Object* user_global);
void SetRootGcRootsReference();
- void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
- void SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
+ void SetGcRootsReference(Root root);
+ void SetGcSubrootReference(Root root, const char* description, bool is_weak,
+ Object* child);
const char* GetStrongGcSubrootName(Object* object);
void TagObject(Object* obj, const char* tag);
void TagFixedArraySubType(const FixedArray* array,
@@ -514,6 +516,8 @@ class NativeObjectsExplorer {
NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
+ HeapEntry* EntryForEmbedderGraphNode(EmbedderGraph::Node* node);
+
Isolate* isolate_;
HeapSnapshot* snapshot_;
StringsStorage* names_;
@@ -522,8 +526,9 @@ class NativeObjectsExplorer {
// RetainedObjectInfo* -> std::vector<HeapObject*>*
base::CustomMatcherHashMap objects_by_info_;
base::CustomMatcherHashMap native_groups_;
- HeapEntriesAllocator* synthetic_entries_allocator_;
- HeapEntriesAllocator* native_entries_allocator_;
+ std::unique_ptr<HeapEntriesAllocator> synthetic_entries_allocator_;
+ std::unique_ptr<HeapEntriesAllocator> native_entries_allocator_;
+ std::unique_ptr<HeapEntriesAllocator> embedder_graph_entries_allocator_;
// Used during references extraction.
SnapshotFiller* filler_;
v8::HeapProfiler::RetainerEdges edges_;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 5a7017ad49..970d462937 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -13,7 +13,8 @@ namespace internal {
CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name,
int line_number, int column_number,
- JITLineInfoTable* line_info, Address instruction_start)
+ std::unique_ptr<JITLineInfoTable> line_info,
+ Address instruction_start)
: bit_field_(TagField::encode(tag) |
BuiltinIdField::encode(Builtins::builtin_count)),
name_prefix_(name_prefix),
@@ -26,7 +27,7 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
deopt_id_(kNoDeoptimizationId),
- line_info_(line_info),
+ line_info_(std::move(line_info)),
instruction_start_(instruction_start) {}
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index bb6ede6d95..9786741b94 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -85,16 +85,6 @@ CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
CodeEntry::kUnresolvedFunctionName);
}
-CodeEntry::~CodeEntry() {
- delete line_info_;
- for (auto location : inline_locations_) {
- for (auto entry : location.second) {
- delete entry;
- }
- }
-}
-
-
uint32_t CodeEntry::GetHash() const {
uint32_t hash = ComputeIntegerHash(tag());
if (script_id_ != v8::UnboundScript::kNoScriptId) {
@@ -137,12 +127,13 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
return v8::CpuProfileNode::kNoLineNumberInfo;
}
-void CodeEntry::AddInlineStack(int pc_offset,
- std::vector<CodeEntry*> inline_stack) {
+void CodeEntry::AddInlineStack(
+ int pc_offset, std::vector<std::unique_ptr<CodeEntry>> inline_stack) {
inline_locations_.insert(std::make_pair(pc_offset, std::move(inline_stack)));
}
-const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
+const std::vector<std::unique_ptr<CodeEntry>>* CodeEntry::GetInlineStack(
+ int pc_offset) const {
auto it = inline_locations_.find(pc_offset);
return it != inline_locations_.end() ? &it->second : nullptr;
}
@@ -528,9 +519,9 @@ void CodeMap::MoveCode(Address from, Address to) {
}
void CodeMap::Print() {
- for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
- base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
- it->second.size, it->second.entry->name());
+ for (const auto& pair : code_map_) {
+ base::OS::Print("%p %5d %s\n", static_cast<void*>(pair.first),
+ pair.second.size, pair.second.entry->name());
}
}
@@ -539,12 +530,6 @@ CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
profiler_(nullptr),
current_profiles_semaphore_(1) {}
-CpuProfilesCollection::~CpuProfilesCollection() {
- for (CpuProfile* profile : finished_profiles_) delete profile;
- for (CpuProfile* profile : current_profiles_) delete profile;
-}
-
-
bool CpuProfilesCollection::StartProfiling(const char* title,
bool record_samples) {
current_profiles_semaphore_.Wait();
@@ -552,7 +537,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
current_profiles_semaphore_.Signal();
return false;
}
- for (CpuProfile* profile : current_profiles_) {
+ for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
if (strcmp(profile->title(), title) == 0) {
// Ignore attempts to start profile with the same title...
current_profiles_semaphore_.Signal();
@@ -560,7 +545,8 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
return true;
}
}
- current_profiles_.push_back(new CpuProfile(profiler_, title, record_samples));
+ current_profiles_.emplace_back(
+ new CpuProfile(profiler_, title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -570,19 +556,22 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = nullptr;
current_profiles_semaphore_.Wait();
- for (size_t i = current_profiles_.size(); i != 0; --i) {
- CpuProfile* current_profile = current_profiles_[i - 1];
- if (title_len == 0 || strcmp(current_profile->title(), title) == 0) {
- profile = current_profile;
- current_profiles_.erase(current_profiles_.begin() + i - 1);
- break;
- }
+
+ auto it =
+ std::find_if(current_profiles_.rbegin(), current_profiles_.rend(),
+ [&](const std::unique_ptr<CpuProfile>& p) {
+ return title_len == 0 || strcmp(p->title(), title) == 0;
+ });
+
+ if (it != current_profiles_.rend()) {
+ (*it)->FinishProfile();
+ profile = it->get();
+ finished_profiles_.push_back(std::move(*it));
+ // Convert reverse iterator to matching forward iterator.
+ current_profiles_.erase(--(it.base()));
}
- current_profiles_semaphore_.Signal();
- if (!profile) return nullptr;
- profile->FinishProfile();
- finished_profiles_.push_back(profile);
+ current_profiles_semaphore_.Signal();
return profile;
}
@@ -599,7 +588,10 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
// Called from VM thread for a completed profile.
auto pos =
- std::find(finished_profiles_.begin(), finished_profiles_.end(), profile);
+ std::find_if(finished_profiles_.begin(), finished_profiles_.end(),
+ [&](const std::unique_ptr<CpuProfile>& finished_profile) {
+ return finished_profile.get() == profile;
+ });
DCHECK(pos != finished_profiles_.end());
finished_profiles_.erase(pos);
}
@@ -611,7 +603,7 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
- for (CpuProfile* profile : current_profiles_) {
+ for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
profile->AddPath(timestamp, path, src_line, update_stats);
}
current_profiles_semaphore_.Signal();
@@ -684,11 +676,13 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Find out if the entry has an inlining stack associated.
int pc_offset =
static_cast<int>(stack_pos - entry->instruction_start());
- const std::vector<CodeEntry*>* inline_stack =
+ const std::vector<std::unique_ptr<CodeEntry>>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
- entries.insert(entries.end(), inline_stack->rbegin(),
- inline_stack->rend());
+ std::transform(
+ inline_stack->rbegin(), inline_stack->rend(),
+ std::back_inserter(entries),
+ [](const std::unique_ptr<CodeEntry>& ptr) { return ptr.get(); });
}
// Skip unresolved frames (e.g. internal frame) and get source line of
// the first JS caller.
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 819800ae6b..5abb955a46 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -8,6 +8,7 @@
#include <map>
#include <vector>
+#include "include/v8-profiler.h"
#include "src/allocation.h"
#include "src/base/hashmap.h"
#include "src/log.h"
@@ -47,9 +48,8 @@ class CodeEntry {
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = nullptr,
+ std::unique_ptr<JITLineInfoTable> line_info = nullptr,
Address instruction_start = nullptr);
- ~CodeEntry();
const char* name_prefix() const { return name_prefix_; }
bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
@@ -57,7 +57,7 @@ class CodeEntry {
const char* resource_name() const { return resource_name_; }
int line_number() const { return line_number_; }
int column_number() const { return column_number_; }
- const JITLineInfoTable* line_info() const { return line_info_; }
+ const JITLineInfoTable* line_info() const { return line_info_.get(); }
int script_id() const { return script_id_; }
void set_script_id(int script_id) { script_id_ = script_id; }
int position() const { return position_; }
@@ -91,8 +91,10 @@ class CodeEntry {
int GetSourceLine(int pc_offset) const;
- void AddInlineStack(int pc_offset, std::vector<CodeEntry*> inline_stack);
- const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
+ void AddInlineStack(int pc_offset,
+ std::vector<std::unique_ptr<CodeEntry>> inline_stack);
+ const std::vector<std::unique_ptr<CodeEntry>>* GetInlineStack(
+ int pc_offset) const;
void AddDeoptInlinedFrames(int deopt_id, std::vector<CpuProfileDeoptFrame>);
bool HasDeoptInlinedFramesFor(int deopt_id) const;
@@ -160,10 +162,10 @@ class CodeEntry {
const char* bailout_reason_;
const char* deopt_reason_;
int deopt_id_;
- JITLineInfoTable* line_info_;
+ std::unique_ptr<JITLineInfoTable> line_info_;
Address instruction_start_;
// Should be an unordered_map, but it doesn't currently work on Win & MacOS.
- std::map<int, std::vector<CodeEntry*>> inline_locations_;
+ std::map<int, std::vector<std::unique_ptr<CodeEntry>>> inline_locations_;
std::map<int, std::vector<CpuProfileDeoptFrame>> deopt_inlined_frames_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
@@ -337,12 +339,13 @@ class CodeMap {
class CpuProfilesCollection {
public:
explicit CpuProfilesCollection(Isolate* isolate);
- ~CpuProfilesCollection();
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
bool StartProfiling(const char* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
- std::vector<CpuProfile*>* profiles() { return &finished_profiles_; }
+ std::vector<std::unique_ptr<CpuProfile>>* profiles() {
+ return &finished_profiles_;
+ }
const char* GetName(Name* name) { return resource_names_.GetName(name); }
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
@@ -357,11 +360,11 @@ class CpuProfilesCollection {
private:
StringsStorage resource_names_;
- std::vector<CpuProfile*> finished_profiles_;
+ std::vector<std::unique_ptr<CpuProfile>> finished_profiles_;
CpuProfiler* profiler_;
// Accessed by VM thread and profile generator thread.
- std::vector<CpuProfile*> current_profiles_;
+ std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
base::Semaphore current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index bd2f158e60..cec71d70e0 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -5,10 +5,12 @@
#include "src/profiler/profiler-listener.h"
#include "src/deoptimizer.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/source-position-table.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -81,10 +83,10 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = abstract_code->address();
- JITLineInfoTable* line_table = nullptr;
+ std::unique_ptr<JITLineInfoTable> line_table;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
- line_table = new JITLineInfoTable();
+ line_table.reset(new JITLineInfoTable());
int offset = abstract_code->IsCode() ? Code::kHeaderSize
: BytecodeArray::kHeaderSize;
for (SourcePositionTableIterator it(abstract_code->source_position_table());
@@ -101,8 +103,8 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
rec->entry = NewCodeEntry(
tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
- GetName(InferScriptName(script_name, shared)), line, column, line_table,
- abstract_code->instruction_start());
+ GetName(InferScriptName(script_name, shared)), line, column,
+ std::move(line_table), abstract_code->instruction_start());
RecordInliningInfo(rec->entry, abstract_code);
RecordDeoptInlinedFrames(rec->entry, abstract_code);
rec->entry->FillFunctionInfo(shared);
@@ -110,6 +112,24 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
DispatchCodeEvent(evt_rec);
}
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code,
+ wasm::WasmName name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->instructions().start();
+ // TODO(herhut): Instead of sanitizing here, make sure all wasm functions
+ // have names.
+ const char* name_ptr =
+ name.start() == nullptr ? "<anonymous>" : GetFunctionName(name.start());
+ rec->entry = NewCodeEntry(
+ tag, name_ptr, CodeEntry::kEmptyNamePrefix, CodeEntry::kEmptyResourceName,
+ CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+ nullptr, code->instructions().start());
+ rec->size = code->instructions().length();
+ DispatchCodeEvent(evt_rec);
+}
+
void ProfilerListener::CodeMoveEvent(AbstractCode* from, Address to) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
@@ -164,6 +184,20 @@ void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
DispatchCodeEvent(evt_rec);
}
+void ProfilerListener::InstructionStreamCreateEvent(
+ CodeEventListener::LogEventsAndTags tag, const InstructionStream* stream,
+ const char* description) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = stream->bytes();
+ rec->entry = NewCodeEntry(
+ tag, description, CodeEntry::kEmptyNamePrefix,
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, stream->bytes());
+ rec->size = static_cast<unsigned>(stream->byte_length());
+ DispatchCodeEvent(evt_rec);
+}
+
void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -199,7 +233,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
DCHECK_EQ(Translation::BEGIN, opcode);
it.Skip(Translation::NumberOfOperandsFor(opcode));
int depth = 0;
- std::vector<CodeEntry*> inline_stack;
+ std::vector<std::unique_ptr<CodeEntry>> inline_stack;
while (it.HasNext() &&
Translation::BEGIN !=
(opcode = static_cast<Translation::Opcode>(it.Next()))) {
@@ -227,7 +261,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
code->instruction_start());
inline_entry->FillFunctionInfo(shared_info);
- inline_stack.push_back(inline_entry);
+ inline_stack.emplace_back(inline_entry);
}
if (!inline_stack.empty()) {
entry->AddInlineStack(pc_offset, std::move(inline_stack));
@@ -276,10 +310,11 @@ void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
CodeEntry* ProfilerListener::NewCodeEntry(
CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name, int line_number,
- int column_number, JITLineInfoTable* line_info, Address instruction_start) {
+ int column_number, std::unique_ptr<JITLineInfoTable> line_info,
+ Address instruction_start) {
std::unique_ptr<CodeEntry> code_entry = base::make_unique<CodeEntry>(
tag, name, name_prefix, resource_name, line_number, column_number,
- line_info, instruction_start);
+ std::move(line_info), instruction_start);
CodeEntry* raw_code_entry = code_entry.get();
code_entries_.push_back(std::move(code_entry));
return raw_code_entry;
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index c111bf81c4..ca2c213a93 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -37,6 +37,9 @@ class ProfilerListener : public CodeEventListener {
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, SharedFunctionInfo* shared,
Name* script_name, int line, int column) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ wasm::WasmCode* code, wasm::WasmName name) override;
+
void CodeMovingGCEvent() override {}
void CodeMoveEvent(AbstractCode* from, Address to) override;
void CodeDisableOptEvent(AbstractCode* code,
@@ -45,6 +48,9 @@ class ProfilerListener : public CodeEventListener {
int fp_to_sp_delta) override;
void GetterCallbackEvent(Name* name, Address entry_point) override;
void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void InstructionStreamCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ const InstructionStream* stream,
+ const char* description) override;
void SetterCallbackEvent(Name* name, Address entry_point) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
@@ -54,7 +60,7 @@ class ProfilerListener : public CodeEventListener {
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = nullptr,
+ std::unique_ptr<JITLineInfoTable> line_info = nullptr,
Address instruction_start = nullptr);
void AddObserver(CodeEventObserver* observer);
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index fef21550ec..31c885fef0 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -76,11 +76,7 @@ SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->RemoveAllocationObserversFromAllSpaces(other_spaces_observer_.get(),
new_space_observer_.get());
- for (auto sample : samples_) {
- delete sample;
- }
- std::set<Sample*> empty;
- samples_.swap(empty);
+ samples_.clear();
}
@@ -101,7 +97,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
AllocationNode* node = AddStack();
node->allocations_[size]++;
Sample* sample = new Sample(size, node, loc, this);
- samples_.insert(sample);
+ samples_.emplace(sample);
sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
}
@@ -123,8 +119,14 @@ void SamplingHeapProfiler::OnWeakCallback(
node = parent;
}
}
- sample->profiler->samples_.erase(sample);
- delete sample;
+ auto it = std::find_if(sample->profiler->samples_.begin(),
+ sample->profiler->samples_.end(),
+ [&sample](const std::unique_ptr<Sample>& s) {
+ return s.get() == sample;
+ });
+
+ sample->profiler->samples_.erase(it);
+ // sample is deleted because its unique ptr was erased from samples_.
}
SamplingHeapProfiler::AllocationNode*
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
index 3a347dd54e..46fa405279 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.h
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -146,7 +146,7 @@ class SamplingHeapProfiler {
std::unique_ptr<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_;
AllocationNode profile_root_;
- std::set<Sample*> samples_;
+ std::set<std::unique_ptr<Sample>> samples_;
const int stack_depth_;
const uint64_t rate_;
v8::HeapProfiler::SamplingFlags flags_;
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 2e8ad779fd..9ea7770b4b 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -80,7 +80,7 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
const char* StringsStorage::GetName(Name* name) {
if (name->IsString()) {
String* str = String::cast(name);
- int length = Min(kMaxNameSize, str->length());
+ int length = Min(FLAG_heap_snapshot_string_limit, str->length());
int actual_length = 0;
std::unique_ptr<char[]> data = str->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index d73a9dd208..834b5a3335 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -31,8 +31,6 @@ class StringsStorage {
const char* GetFunctionName(const char* name);
private:
- static const int kMaxNameSize = 1024;
-
static bool StringsMatch(void* key1, void* key2);
const char* AddOrDisposeString(char* str, int len);
base::CustomMatcherHashMap::Entry* GetEntry(const char* str, int len);
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 44bf9af3d1..a6b8b26d00 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -204,7 +204,12 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
#endif
DCHECK(regs->sp);
- if (regs->pc && IsNoFrameRegion(static_cast<i::Address>(regs->pc))) {
+ // Check whether we interrupted setup/teardown of a stack frame in JS code.
+ // Avoid this check for C++ code, as that would trigger false positives.
+ if (regs->pc &&
+ isolate->heap()->memory_allocator()->code_range()->contains(
+ static_cast<i::Address>(regs->pc)) &&
+ IsNoFrameRegion(static_cast<i::Address>(regs->pc))) {
// The frame is not setup, so it'd be hard to iterate the stack. Bailout.
return false;
}
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index e654f2be9d..ccd1fa42a2 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H
-#define V8_PROFILER_TRACING_CPU_PROFILER_H
+#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H_
+#define V8_PROFILER_TRACING_CPU_PROFILER_H_
#include "include/v8-platform.h"
#include "include/v8-profiler.h"
@@ -43,4 +43,4 @@ class TracingCpuProfilerImpl final
} // namespace internal
} // namespace v8
-#endif // V8_PROFILER_TRACING_CPU_PROFILER_H
+#endif // V8_PROFILER_TRACING_CPU_PROFILER_H_
diff --git a/deps/v8/src/profiler/unbound-queue.h b/deps/v8/src/profiler/unbound-queue.h
index c53b35a8ed..062f1ce609 100644
--- a/deps/v8/src/profiler/unbound-queue.h
+++ b/deps/v8/src/profiler/unbound-queue.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROFILER_UNBOUND_QUEUE_
-#define V8_PROFILER_UNBOUND_QUEUE_
+#ifndef V8_PROFILER_UNBOUND_QUEUE_H_
+#define V8_PROFILER_UNBOUND_QUEUE_H_
#include "src/allocation.h"
#include "src/base/atomicops.h"
@@ -45,4 +45,4 @@ class UnboundQueue BASE_EMBEDDED {
} // namespace internal
} // namespace v8
-#endif // V8_PROFILER_UNBOUND_QUEUE_
+#endif // V8_PROFILER_UNBOUND_QUEUE_H_
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 5c744a1bd7..eccaeb006f 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -7,7 +7,11 @@
#include <iosfwd>
-#include "src/factory.h"
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/objects/name.h"
+#include "src/property-details.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 5f77ff4021..d366349640 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -41,7 +41,7 @@ namespace internal {
* - x29/fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - x16/x17 : IP registers, used by assembler. Very volatile.
- * - csp : Points to tip of C stack.
+ * - sp : Points to tip of C stack.
*
* - x0-x7 : Used as a cache to store 32 bit capture registers. These
* registers need to be retained every time a call to C code
@@ -57,7 +57,7 @@ namespace internal {
* the code)
*
* - fp[96] isolate Address of the current isolate.
- * ^^^ csp when called ^^^
+ * ^^^ sp when called ^^^
* - fp[88] lr Return from the RegExp code.
* - fp[80] r29 Old frame pointer (CalleeSaved).
* - fp[0..72] r19-r28 Backup of CalleeSaved registers.
@@ -77,7 +77,7 @@ namespace internal {
* - ... num_saved_registers_ registers.
* - ...
* - register N + num_registers - 1
- * ^^^^^^^^^ csp ^^^^^^^^^
+ * ^^^^^^^^^ sp ^^^^^^^^^
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
@@ -704,9 +704,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// x6: Address stack_base
// x7: int direct_call
- // The stack pointer should be csp on entry.
- // csp[8]: address of the current isolate
- // csp[0]: secondary link/return address used by native call
+ // sp[8]: address of the current isolate
+ // sp[0]: secondary link/return address used by native call
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// code is generated.
@@ -719,12 +718,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
DCHECK_EQ(11, kCalleeSaved.Count());
registers_to_retain.Combine(lr);
- DCHECK(csp.Is(__ StackPointer()));
__ PushCPURegList(registers_to_retain);
__ PushCPURegList(argument_registers);
// Set frame pointer in place.
- __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
+ __ Add(frame_pointer(), sp, argument_registers.Count() * kPointerSize);
// Initialize callee-saved registers.
__ Mov(start_offset(), w1);
@@ -755,7 +753,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
- __ Subs(x10, csp, x10);
+ __ Subs(x10, sp, x10);
// Handle it if the stack pointer is already below the stack limit.
__ B(ls, &stack_limit_hit);
@@ -1015,9 +1013,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Bind(&return_w0);
// Set stack pointer back to first register to retain
- DCHECK(csp.Is(__ StackPointer()));
- __ Mov(csp, fp);
- __ AssertStackConsistency();
+ __ Mov(sp, fp);
// Restore registers.
__ PopCPURegList(registers_to_retain);
@@ -1036,7 +1032,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
CallCheckStackGuardState(x10);
- // Returning from the regexp code restores the stack (csp <- fp)
+ // Returning from the regexp code restores the stack (sp <- fp)
// so we don't need to drop the link register from it before exiting.
__ Cbnz(w0, &return_w0);
// Reset the cached registers.
@@ -1059,7 +1055,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, 3);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- // Returning from the regexp code restores the stack (csp <- fp)
+ // Returning from the regexp code restores the stack (sp <- fp)
// so we don't need to drop the link register from it before exiting.
__ Cbz(w0, &exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1366,14 +1362,13 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
int align_mask = (alignment / kXRegSize) - 1;
int xreg_to_claim = (3 + align_mask) & ~align_mask;
- DCHECK(csp.Is(__ StackPointer()));
__ Claim(xreg_to_claim);
// CheckStackGuardState needs the end and start addresses of the input string.
__ Poke(input_end(), 2 * kPointerSize);
- __ Add(x5, csp, 2 * kPointerSize);
+ __ Add(x5, sp, 2 * kPointerSize);
__ Poke(input_start(), kPointerSize);
- __ Add(x4, csp, kPointerSize);
+ __ Add(x4, sp, kPointerSize);
__ Mov(w3, start_offset());
// RegExp code frame pointer.
@@ -1384,7 +1379,7 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
// We need to pass a pointer to the return address as first argument.
// The DirectCEntry stub will place the return address on the stack before
// calling so the stack pointer will point to it.
- __ Mov(x0, csp);
+ __ Mov(x0, sp);
ExternalReference check_stack_guard_state =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1396,7 +1391,6 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
__ Peek(input_start(), kPointerSize);
__ Peek(input_end(), 2 * kPointerSize);
- DCHECK(csp.Is(__ StackPointer()));
__ Drop(xreg_to_claim);
// Reload the Code pointer.
@@ -1445,8 +1439,7 @@ void RegExpMacroAssemblerARM64::CheckPreemption() {
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
- DCHECK(csp.Is(__ StackPointer()));
- __ Cmp(csp, x10);
+ __ Cmp(sp, x10);
CallIf(&check_preempt_label_, ls);
}
@@ -1557,14 +1550,12 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
- DCHECK(csp.Is(__ StackPointer()));
__ Pop(lr, xzr);
__ Add(lr, lr, Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerARM64::SaveLinkRegister() {
- DCHECK(csp.Is(__ StackPointer()));
__ Sub(lr, lr, Operand(masm_->CodeObject()));
__ Push(xzr, lr);
}
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 7ba028020b..2c1b890c4f 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -519,6 +519,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE)
+ V8_FALLTHROUGH;
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
bool unicode =
(insn & BYTECODE_MASK) == BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE;
@@ -537,6 +538,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD)
+ V8_FALLTHROUGH;
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
bool unicode = (insn & BYTECODE_MASK) ==
BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD;
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index a26a1d77ce..b90b0a51a6 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -1732,7 +1732,7 @@ static inline bool EmitAtomLetter(Isolate* isolate,
}
case 4:
macro_assembler->CheckCharacter(chars[3], &ok);
- // Fall through!
+ V8_FALLTHROUGH;
case 3:
macro_assembler->CheckCharacter(chars[0], &ok);
macro_assembler->CheckCharacter(chars[1], &ok);
@@ -2768,16 +2768,13 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
uint16_t c = quarks[j];
- if (c <= String::kMaxOneByteCharCode) continue;
- if (!IgnoreCase(elm.atom()->flags())) return set_replacement(nullptr);
- // Here, we need to check for characters whose upper and lower cases
- // are outside the Latin-1 range.
- uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
- // Character is outside Latin-1 completely
- if (converted == 0) return set_replacement(nullptr);
- // Convert quark to Latin-1 in place.
- uint16_t* copy = const_cast<uint16_t*>(quarks.start());
- copy[j] = converted;
+ if (elm.atom()->ignore_case()) {
+ c = unibrow::Latin1::TryConvertToLatin1(c);
+ }
+ if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
+ // Replace quark in case we converted to Latin-1.
+ uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.start());
+ writable_quarks[j] = c;
}
} else {
DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
@@ -3209,10 +3206,17 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
EmitCharacterFunction* emit_function = nullptr;
+ uc16 quark = quarks[j];
+ if (elm.atom()->ignore_case()) {
+ // Everywhere else we assume that a non-Latin-1 character cannot match
+ // a Latin-1 character. Avoid the cases where this is assumption is
+ // invalid by using the Latin1 equivalent instead.
+ quark = unibrow::Latin1::TryConvertToLatin1(quark);
+ }
switch (pass) {
case NON_LATIN1_MATCH:
DCHECK(one_byte);
- if (quarks[j] > String::kMaxOneByteCharCode) {
+ if (quark > String::kMaxOneByteCharCode) {
assembler->GoTo(backtrack);
return;
}
@@ -3232,8 +3236,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (emit_function != nullptr) {
bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
bool bound_checked =
- emit_function(isolate, compiler, quarks[j], backtrack,
- cp_offset + j, bounds_check, preloaded);
+ emit_function(isolate, compiler, quark, backtrack, cp_offset + j,
+ bounds_check, preloaded);
if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
}
}
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
index 3fce7dd688..4ce9d7f91d 100644
--- a/deps/v8/src/regexp/mips/OWNERS
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com \ No newline at end of file
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index c0023f409b..37c1d3fbb6 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#ifndef V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
+#define V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
#include "src/macro-assembler.h"
#include "src/mips64/assembler-mips64.h"
@@ -228,4 +228,4 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#endif // V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index a7724c5d42..72ed5b8d69 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -399,8 +399,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
break;
}
+ V8_FALLTHROUGH;
}
- // Fall through.
case '0': {
Advance();
if (unicode() && Next() >= '0' && Next() <= '9') {
@@ -493,7 +493,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ParseNamedBackReference(builder, state CHECK_FAILED);
break;
}
- // Fall through.
+ V8_FALLTHROUGH;
default:
Advance();
// With /u, no identity escapes except for syntax characters
@@ -511,14 +511,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
int dummy;
bool parsed = ParseIntervalQuantifier(&dummy, &dummy CHECK_FAILED);
if (parsed) return ReportError(CStrVector("Nothing to repeat"));
- // Fall through.
+ V8_FALLTHROUGH;
}
case '}':
case ']':
if (unicode()) {
return ReportError(CStrVector("Lone quantifier brackets"));
}
- // Fall through.
+ V8_FALLTHROUGH;
default:
builder->AddUnicodeCharacter(current());
Advance();
@@ -684,7 +684,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
Advance();
break;
}
- // Fall through.
+ V8_FALLTHROUGH;
default:
ReportError(CStrVector("Invalid group"));
return nullptr;
@@ -1515,7 +1515,7 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
Advance();
return 0;
}
- // Fall through.
+ V8_FALLTHROUGH;
case '1':
case '2':
case '3':
@@ -1986,8 +1986,14 @@ bool RegExpBuilder::AddQuantifierToAtom(
} else if (terms_.length() > 0) {
DCHECK(last_added_ == ADD_ATOM);
atom = terms_.RemoveLast();
- // With /u, lookarounds are not quantifiable.
- if (unicode() && atom->IsLookaround()) return false;
+ if (atom->IsLookaround()) {
+ // With /u, lookarounds are not quantifiable.
+ if (unicode()) return false;
+ // Lookbehinds are not quantifiable.
+ if (atom->AsLookaround()->type() == RegExpLookaround::LOOKBEHIND) {
+ return false;
+ }
+ }
if (atom->max_match() == 0) {
// Guaranteed to only match an empty string.
LAST(ADD_TERM);
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 94603cd7c9..40ba5ece25 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
-#define V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#ifndef V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#define V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
#include "src/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -210,4 +210,4 @@ const RegList kRegExpCalleeSaved =
} // namespace internal
} // namespace v8
-#endif // V8_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
+#endif // V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 6ecc5519f3..6b472a0b4e 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -64,29 +64,6 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
Simd128Register::kNumRegisters);
-static int get_num_allocatable_general_registers() {
- return
-#if V8_TARGET_ARCH_IA32
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_X64
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_ARM
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_ARM64
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_MIPS
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_MIPS64
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_PPC
- kMaxAllocatableGeneralRegisterCount;
-#elif V8_TARGET_ARCH_S390
- kMaxAllocatableGeneralRegisterCount;
-#else
-#error Unsupported target architecture.
-#endif
-}
-
static int get_num_allocatable_double_registers() {
return
#if V8_TARGET_ARCH_IA32
@@ -127,7 +104,7 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
ArchDefaultRegisterConfiguration()
: RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters,
- get_num_allocatable_general_registers(),
+ kMaxAllocatableGeneralRegisterCount,
get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
get_allocatable_double_codes(),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
@@ -145,6 +122,66 @@ static base::LazyInstance<ArchDefaultRegisterConfiguration,
RegisterConfigurationInitializer>::type
kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+// Allocatable registers with the masking register removed.
+class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
+ public:
+ ArchDefaultPoisoningRegisterConfiguration()
+ : RegisterConfiguration(
+ Register::kNumRegisters, DoubleRegister::kNumRegisters,
+ kMaxAllocatableGeneralRegisterCount - 1,
+ get_num_allocatable_double_registers(),
+ InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
+ kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
+ InitializeGeneralRegisterNames(), kFloatRegisterNames,
+ kDoubleRegisterNames, kSimd128RegisterNames) {}
+
+ private:
+ static char const* const* InitializeGeneralRegisterNames() {
+ int filtered_index = 0;
+ for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
+ if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
+ general_register_names_[filtered_index] = kGeneralRegisterNames[i];
+ filtered_index++;
+ }
+ }
+ DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
+ return general_register_names_;
+ }
+
+ static const int* InitializeGeneralRegisterCodes() {
+ int filtered_index = 0;
+ for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
+ if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
+ allocatable_general_codes_[filtered_index] =
+ kAllocatableGeneralCodes[i];
+ filtered_index++;
+ }
+ }
+ DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
+ return allocatable_general_codes_;
+ }
+
+ static const char*
+ general_register_names_[kMaxAllocatableGeneralRegisterCount - 1];
+ static int
+ allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
+};
+
+const char* ArchDefaultPoisoningRegisterConfiguration::general_register_names_
+ [kMaxAllocatableGeneralRegisterCount - 1];
+int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
+ [kMaxAllocatableGeneralRegisterCount - 1];
+
+struct PoisoningRegisterConfigurationInitializer {
+ static void Construct(void* config) {
+ new (config) ArchDefaultPoisoningRegisterConfiguration();
+ }
+};
+
+static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
+ PoisoningRegisterConfigurationInitializer>::type
+ kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
// to use less registers. Currently, it's only used by RecordWrite code stub.
@@ -193,6 +230,10 @@ const RegisterConfiguration* RegisterConfiguration::Default() {
return &kDefaultRegisterConfiguration.Get();
}
+const RegisterConfiguration* RegisterConfiguration::Poisoning() {
+ return &kDefaultPoisoningRegisterConfiguration.Get();
+}
+
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
int num = NumRegs(registers);
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index 59aeab8742..1299baac69 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
-#define V8_COMPILER_REGISTER_CONFIGURATION_H_
+#ifndef V8_REGISTER_CONFIGURATION_H_
+#define V8_REGISTER_CONFIGURATION_H_
#include "src/base/macros.h"
#include "src/globals.h"
@@ -31,6 +31,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
// Default RegisterConfigurations for the target architecture.
static const RegisterConfiguration* Default();
+ // Register configuration with reserved masking register.
+ static const RegisterConfiguration* Poisoning();
+
static const RegisterConfiguration* RestrictGeneralRegisters(
RegList registers);
@@ -165,4 +168,4 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_REGISTER_CONFIGURATION_H_
+#endif // V8_REGISTER_CONFIGURATION_H_
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index f07c842bae..648606a274 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -149,7 +149,8 @@ Object* PrepareElementsForSort(Handle<JSObject> object, uint32_t limit) {
JSObject::ValidateElements(*object);
} else if (object->HasFixedTypedArrayElements()) {
// Typed arrays cannot have holes or undefined elements.
- return Smi::FromInt(FixedArrayBase::cast(object->elements())->length());
+ int array_length = FixedArrayBase::cast(object->elements())->length();
+ return Smi::FromInt(Min(limit, static_cast<uint32_t>(array_length)));
} else if (!object->HasDoubleElements()) {
JSObject::EnsureWritableFastElements(object);
}
@@ -390,7 +391,7 @@ RUNTIME_FUNCTION(Runtime_TrySliceSimpleNonFastElements) {
// implementation.
if (receiver->IsJSArray()) {
// This "fastish" path must make sure the destination array is a JSArray.
- if (!isolate->IsArraySpeciesLookupChainIntact() ||
+ if (!isolate->IsSpeciesLookupChainIntact() ||
!JSArray::cast(*receiver)->HasArrayPrototype(isolate)) {
return Smi::FromInt(0);
}
@@ -532,17 +533,15 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
return *array;
}
-
-// GrowArrayElements returns a sentinel Smi if the object was normalized.
+// GrowArrayElements returns a sentinel Smi if the object was normalized or if
+// the key is negative.
RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(int, key, Int32, args[1]);
- if (key < 0) {
- return object->elements();
- }
+ if (key < 0) return Smi::kZero;
uint32_t capacity = static_cast<uint32_t>(object->elements()->length());
uint32_t index = static_cast<uint32_t>(key);
@@ -553,7 +552,6 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
}
}
- // On success, return the fixed array elements.
return object->elements();
}
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 68a7b413b5..9849c694dc 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -249,30 +249,6 @@ inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
V(Uint32, uint32, UINT32, uint32_t, 4) \
V(Int32, int32, INT32, int32_t, 4)
-RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex));
-}
-
RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index 47f644f619..ce0d8990a1 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -75,6 +75,13 @@ RUNTIME_FUNCTION(Runtime_BigIntToNumber) {
return *BigInt::ToNumber(x);
}
+RUNTIME_FUNCTION(Runtime_ToBigInt) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, x));
+}
+
RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 44e947aafe..efe4f455b1 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -11,24 +11,22 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_TheHole) {
+RUNTIME_FUNCTION(Runtime_IsJSMapIterator) {
SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- return isolate->heap()->the_hole_value();
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsJSMapIterator());
}
-RUNTIME_FUNCTION(Runtime_GetExistingHash) {
+RUNTIME_FUNCTION(Runtime_IsJSSetIterator) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- return object->GetHash();
+ return isolate->heap()->ToBoolean(args[0]->IsJSSetIterator());
}
-RUNTIME_FUNCTION(Runtime_GenericHash) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- return object->GetOrCreateHash(isolate);
+RUNTIME_FUNCTION(Runtime_TheHole) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->the_hole_value();
}
RUNTIME_FUNCTION(Runtime_SetGrow) {
@@ -101,15 +99,6 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
return *JSWeakCollection::GetEntries(holder, max_entries);
}
-RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- JSWeakCollection::Initialize(weak_collection, isolate);
- return *weak_collection;
-}
-
-
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 96292ad1c5..d149af652b 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -21,15 +21,6 @@ RUNTIME_FUNCTION(Runtime_IsDate) {
return isolate->heap()->ToBoolean(obj->IsJSDate());
}
-
-RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError(MessageTemplate::kNotDateObject));
-}
-
-
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index d6e028b41e..daef53280e 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -43,7 +43,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
- isolate->debug()->Break(it.frame());
+ isolate->debug()->Break(it.frame(), handle(it.frame()->function()));
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
@@ -53,21 +53,25 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
BytecodeArray* bytecode_array = shared->bytecode_array();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
- if (bytecode == Bytecode::kReturn) {
- // If we are returning, reset the bytecode array on the interpreted stack
- // frame to the non-debug variant so that the interpreter entry trampoline
- // sees the return bytecode rather than the DebugBreak.
+ if (Bytecodes::Returns(bytecode)) {
+ // If we are returning (or suspending), reset the bytecode array on the
+ // interpreted stack frame to the non-debug variant so that the interpreter
+ // entry trampoline sees the return/suspend bytecode rather than the
+ // DebugBreak.
interpreted_frame->PatchBytecodeArray(bytecode_array);
}
// We do not have to deal with operand scale here. If the bytecode at the
// break is prefixed by operand scaling, we would have patched over the
// scaling prefix. We now simply dispatch to the handler for the prefix.
+ // We need to deserialize now to ensure we don't hit the debug break again
+ // after deserializing.
OperandScale operand_scale = OperandScale::kSingle;
- Code* code = isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(
- bytecode, operand_scale);
+ isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(bytecode,
+ operand_scale);
- return MakePair(isolate->debug()->return_value(), code);
+ return MakePair(isolate->debug()->return_value(),
+ Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
@@ -81,27 +85,6 @@ RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
}
-// Adds a JavaScript function as a debug event listener.
-// args[0]: debug event listener function to set or null or undefined for
-// clearing the event listener function
-// args[1]: object supplied during callback
-RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CHECK(args[0]->IsJSFunction() || args[0]->IsNullOrUndefined(isolate));
- CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
- if (callback->IsJSFunction()) {
- JavaScriptDebugDelegate* delegate = new JavaScriptDebugDelegate(
- isolate, Handle<JSFunction>::cast(callback), data);
- isolate->debug()->SetDebugDelegate(delegate, true);
- } else {
- isolate->debug()->SetDebugDelegate(nullptr, false);
- }
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -261,7 +244,10 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
result->set(1, *status_str);
- Handle<Object> value_obj(promise->result(), isolate);
+ Handle<Object> value_obj(promise->status() == Promise::kPending
+ ? isolate->heap()->undefined_value()
+ : promise->result(),
+ isolate);
Handle<String> promise_value =
factory->NewStringFromAsciiChecked("[[PromiseValue]]");
result->set(2, *promise_value);
@@ -855,8 +841,7 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
// local).
if (frame->is_wasm_interpreter_entry()) {
Handle<WasmDebugInfo> debug_info(
- WasmInterpreterEntryFrame::cast(frame)->wasm_instance()->debug_info(),
- isolate);
+ WasmInterpreterEntryFrame::cast(frame)->debug_info(), isolate);
return *WasmDebugInfo::GetScopeDetails(debug_info, frame->fp(),
inlined_frame_index);
}
@@ -1036,36 +1021,6 @@ RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
}
-RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
-
-#ifdef DEBUG
- // Print the scopes for the top frame.
- JavaScriptFrameIterator it(isolate);
- if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- FrameInspector frame_inspector(frame, 0, isolate);
- for (ScopeIterator si(isolate, &frame_inspector); !si.Done(); si.Next()) {
- si.DebugPrint();
- }
- }
-#endif
- return isolate->heap()->undefined_value();
-}
-
-
-// Sets the disable break state
-// args[0]: disable break state
-RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_BOOLEAN_ARG_CHECKED(active, 0);
- isolate->debug()->set_break_points_active(active);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1084,71 +1039,6 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
}
-// Set a break point in a function.
-// args[0]: function
-// args[1]: number: break source position (within the function source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CHECK(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- CHECK(source_position >= function->shared()->start_position() &&
- source_position <= function->shared()->end_position());
- CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
-
- // Set break point.
- CHECK(isolate->debug()->SetBreakPoint(function, break_point_object_arg,
- &source_position));
-
- return Smi::FromInt(source_position);
-}
-
-// Changes the state of a break point in a script and returns source position
-// where break point was set. NOTE: Regarding performance see the NOTE for
-// GetScriptFromScriptData.
-// args[0]: script to set break point in
-// args[1]: number: break source position (within the script source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CHECK(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- CHECK_GE(source_position, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
-
- // Get the script from the script wrapper.
- CHECK(wrapper->value()->IsScript());
- Handle<Script> script(Script::cast(wrapper->value()));
-
- // Set break point.
- if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
- &source_position)) {
- return isolate->heap()->undefined_value();
- }
-
- return Smi::FromInt(source_position);
-}
-
-
-// Clear a break point
-// args[0]: number: break point object
-RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CHECK(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
-
- // Clear break point.
- isolate->debug()->ClearBreakPoint(break_point_object_arg);
-
- return isolate->heap()->undefined_value();
-}
-
-
// Change the state of break on exceptions.
// args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
// args[1]: Boolean indicating on/off.
@@ -1572,46 +1462,6 @@ int ScriptLinePosition(Handle<Script> script, int line) {
} // namespace
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- return Smi::FromInt(ScriptLinePosition(script_handle, line));
-}
-
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- if (script_handle->type() == Script::TYPE_WASM) {
- // Return zero for now; this function will disappear soon anyway.
- return Smi::FromInt(0);
- }
-
- Script::InitLineEnds(script_handle);
-
- FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
- const int line_count = line_ends_array->length();
-
- if (line < 0 || line >= line_count) {
- return Smi::FromInt(-1);
- } else {
- return Smi::cast(line_ends_array->get(line));
- }
-}
-
static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
Script::OffsetFlag offset_flag,
Isolate* isolate) {
@@ -1774,56 +1624,26 @@ RUNTIME_FUNCTION(Runtime_ScriptPositionInfo2) {
return *GetJSPositionInfo(script, position, offset_flag, isolate);
}
-// Returns the given line as a string, or null if line is out of bounds.
-// The parameter line is expected to include the script's line offset.
-// TODO(5530): Remove once uses in debug.js are gone.
-RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSValue, script, 0);
- CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
-
- if (script_handle->type() == Script::TYPE_WASM) {
- // Return null for now; this function will disappear soon anyway.
- return isolate->heap()->null_value();
- }
-
- Script::InitLineEnds(script_handle);
-
- FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
- const int line_count = line_ends_array->length();
-
- line -= script_handle->line_offset();
- if (line < 0 || line_count <= line) {
- return isolate->heap()->null_value();
- }
-
- const int start =
- (line == 0) ? 0 : Smi::ToInt(line_ends_array->get(line - 1)) + 1;
- const int end = Smi::ToInt(line_ends_array->get(line));
-
- Handle<String> source =
- handle(String::cast(script_handle->source()), isolate);
- Handle<String> str = isolate->factory()->NewSubString(source, start, end);
-
- return *str;
-}
-
// On function call, depending on circumstances, prepare for stepping in,
// or perform a side effect check.
RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- if (isolate->debug()->last_step_action() >= StepIn) {
- isolate->debug()->PrepareStepIn(fun);
+ if (isolate->debug()->needs_check_on_function_call()) {
+ // Ensure that the callee will perform debug check on function call too.
+ Deoptimizer::DeoptimizeFunction(*fun);
+ if (isolate->debug()->last_step_action() >= StepIn) {
+ isolate->debug()->PrepareStepIn(fun);
+ }
+ if (isolate->needs_side_effect_check() &&
+ !isolate->debug()->PerformSideEffectCheck(fun)) {
+ return isolate->heap()->exception();
+ }
}
- if (isolate->needs_side_effect_check() &&
- !isolate->debug()->PerformSideEffectCheck(fun)) {
- return isolate->heap()->exception();
+ if (fun->shared()->HasDebugInfo() &&
+ fun->shared()->GetDebugInfo()->BreakAtEntry()) {
+ isolate->debug()->Break(nullptr, fun);
}
return isolate->heap()->undefined_value();
}
@@ -1836,15 +1656,6 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInSuspendedGenerator) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugRecordGenerator) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
- CHECK(isolate->debug()->last_step_action() >= StepNext);
- isolate->debug()->RecordGenerator(generator);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
@@ -1876,26 +1687,11 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugPromiseReject) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, rejected_promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
- isolate->debug()->OnPromiseReject(rejected_promise, value);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DebugIsActive) {
SealHandleScope shs(isolate);
return Smi::FromInt(isolate->debug()->is_active());
}
-RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
- UNIMPLEMENTED();
- return nullptr;
-}
-
namespace {
Handle<JSObject> MakeRangeObject(Isolate* isolate, const CoverageBlock& range) {
Factory* factory = isolate->factory();
diff --git a/deps/v8/src/runtime/runtime-error.cc b/deps/v8/src/runtime/runtime-error.cc
index 6ded550d04..7cd98f223b 100644
--- a/deps/v8/src/runtime/runtime-error.cc
+++ b/deps/v8/src/runtime/runtime-error.cc
@@ -20,5 +20,11 @@ RUNTIME_FUNCTION(Runtime_ErrorToString) {
RETURN_RESULT_OR_FAILURE(isolate, ErrorUtils::ToString(isolate, recv));
}
+RUNTIME_FUNCTION(Runtime_IsJSError) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsJSError());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index e9433d2041..a9eddef644 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -88,17 +88,6 @@ RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
return fun->native_context()->debug_context_id();
}
-RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- CONVERT_SMI_ARG_CHECKED(length, 1);
- fun->shared()->set_length(length);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -156,10 +145,10 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
Handle<Context> context(source->context());
target->set_context(*context);
- // Make sure we get a fresh copy of the literal vector to avoid cross
- // context contamination, and that the literal vector makes it's way into
+ // Make sure we get a fresh copy of the feedback vector to avoid cross
+ // context contamination, and that the feedback vector makes it's way into
// the target_shared optimized code map.
- JSFunction::EnsureLiterals(target);
+ JSFunction::EnsureFeedbackVector(target);
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
isolate->logger()->LogExistingFunction(
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 9323d236bc..a7d14b839e 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -11,6 +11,12 @@
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_IsJSGeneratorObject) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsJSGeneratorObject());
+}
+
RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -30,6 +36,9 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
generator->set_receiver(*receiver);
generator->set_register_file(*register_file);
generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
+ if (generator->IsJSAsyncGeneratorObject()) {
+ Handle<JSAsyncGeneratorObject>::cast(generator)->set_is_awaiting(0);
+ }
return *generator;
}
@@ -55,13 +64,31 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
return generator->receiver();
}
-RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
+RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
+RUNTIME_FUNCTION(Runtime_AsyncFunctionAwaitCaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncFunctionAwaitUncaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncGeneratorAwaitCaught) {
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+}
+
+RUNTIME_FUNCTION(Runtime_AsyncGeneratorAwaitUncaught) {
// Runtime call is implemented in InterpreterIntrinsics and lowered in
// JSIntrinsicLowering
UNREACHABLE();
@@ -126,12 +153,11 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
SharedFunctionInfo* shared = generator->function()->shared();
DCHECK(shared->HasBytecodeArray());
- HandlerTable* handler_table =
- HandlerTable::cast(shared->bytecode_array()->handler_table());
+ HandlerTable handler_table(shared->bytecode_array());
int pc = Smi::cast(generator->input_or_debug_pos())->value();
HandlerTable::CatchPrediction catch_prediction = HandlerTable::ASYNC_AWAIT;
- handler_table->LookupRange(pc, nullptr, &catch_prediction);
+ handler_table.LookupRange(pc, nullptr, &catch_prediction);
return isolate->heap()->ToBoolean(catch_prediction == HandlerTable::CAUGHT);
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index f9e9375543..a24ded7e21 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -30,6 +30,12 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_IsScriptWrapper) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(args[0]->IsScriptWrapper());
+}
+
RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -206,30 +212,6 @@ RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
return *isolate->factory()->NewSyntaxError(message_template, arg0);
}
-RUNTIME_FUNCTION(Runtime_ThrowCannotConvertToPrimitive) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCannotConvertToPrimitive));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowIncompatibleMethodReceiver) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, arg1, 1);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, arg0, arg1));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowInvalidHint) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, hint, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kInvalidHint, hint));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
HandleScope scope(isolate);
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
@@ -258,18 +240,6 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
}
-RUNTIME_FUNCTION(Runtime_ThrowNonCallableInInstanceOfCheck) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNonCallableInInstanceOfCheck));
-}
-
-RUNTIME_FUNCTION(Runtime_ThrowNonObjectInInstanceOfCheck) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -278,13 +248,6 @@ RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
isolate, NewTypeError(MessageTemplate::kNotConstructor, object));
}
-RUNTIME_FUNCTION(Runtime_ThrowGeneratorRunning) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kGeneratorRunning));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -454,14 +417,6 @@ RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(id, callsite));
}
-RUNTIME_FUNCTION(Runtime_ThrowCalledOnNullOrUndefined) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, name));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -486,14 +441,6 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorReturnedNonObject) {
NewTypeError(MessageTemplate::kDerivedConstructorReturnedNonObject));
}
-RUNTIME_FUNCTION(Runtime_ThrowUndefinedOrNullToObject) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject, name));
-}
-
// ES6 section 7.3.17 CreateListFromArrayLike (obj)
RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
HandleScope scope(isolate);
@@ -526,10 +473,6 @@ RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
DCHECK(Builtins::IsLazy(builtin_id));
DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
- if (FLAG_trace_lazy_deserialization) {
- PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
- }
-
Code* code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
DCHECK_EQ(builtin_id, code->builtin_index());
DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
@@ -638,17 +581,21 @@ RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
}
+ Handle<Object> next;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, next,
+ Object::GetProperty(sync_iterator, isolate->factory()->next_string()));
+
return *isolate->factory()->NewJSAsyncFromSyncIterator(
- Handle<JSReceiver>::cast(sync_iterator));
+ Handle<JSReceiver>::cast(sync_iterator), next);
}
-RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
+RUNTIME_FUNCTION(Runtime_CreateTemplateObject) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(TemplateObjectDescription, description, 0);
- return *TemplateObjectDescription::GetTemplateObject(
- description, isolate->native_context());
+ return *TemplateObjectDescription::CreateTemplateObject(description);
}
RUNTIME_FUNCTION(Runtime_ReportMessage) {
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index b65a2327a3..836bf4d5f6 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -41,21 +41,6 @@ RUNTIME_FUNCTION(Runtime_InterpreterDeserializeLazy) {
bytecode, operand_scale);
}
-RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
- CONVERT_SMI_ARG_CHECKED(index, 2);
- CONVERT_SMI_ARG_CHECKED(pretenured_flag, 3);
- Handle<Context> context(isolate->context(), isolate);
- FeedbackSlot slot = FeedbackVector::ToSlot(index);
- Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
- return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, vector_cell,
- static_cast<PretenureFlag>(pretenured_flag));
-}
-
#ifdef V8_TRACE_IGNITION
namespace {
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index aaa6034e80..a0e0db8cd0 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -15,6 +15,7 @@
#include "src/api.h"
#include "src/arguments.h"
#include "src/factory.h"
+#include "src/global-handles.h"
#include "src/intl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -626,7 +627,8 @@ RUNTIME_FUNCTION(Runtime_PluralRulesSelect) {
icu::UnicodeString result = plural_rules->select(rounded);
return *isolate->factory()
->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ reinterpret_cast<const uint16_t*>(
+ icu::toUCharPtr(result.getBuffer())),
result.length()))
.ToHandleChecked();
}
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index a9fb48f887..a758050306 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -37,24 +37,6 @@ RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
return *Module::GetModuleNamespace(module, module_request);
}
-RUNTIME_FUNCTION(Runtime_LoadModuleVariable) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(index, 0);
- Handle<Module> module(isolate->context()->module());
- return *Module::LoadVariable(module, index);
-}
-
-RUNTIME_FUNCTION(Runtime_StoreModuleVariable) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(index, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- Handle<Module> module(isolate->context()->module());
- Module::StoreVariable(module, index, value);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 057ead9407..90dddab211 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -34,6 +34,14 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
MaybeHandle<Object> result = Object::GetProperty(&it);
if (is_found_out) *is_found_out = it.IsFound();
+
+ if (!it.IsFound() && key->IsSymbol() &&
+ Symbol::cast(*key)->is_private_field()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldAccess, key, object),
+ Object);
+ }
return result;
}
@@ -390,6 +398,14 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
LookupIterator::PropertyOrElement(isolate, object, key, &success);
if (!success) return MaybeHandle<Object>();
+ if (!it.IsFound() && key->IsSymbol() &&
+ Symbol::cast(*key)->is_private_field()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldAccess, key, object),
+ Object);
+ }
+
MAYBE_RETURN_NULL(Object::SetProperty(&it, value, language_mode,
Object::MAY_BE_STORE_FROM_KEYED));
return value;
@@ -439,6 +455,61 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
return *object;
}
+RUNTIME_FUNCTION(Runtime_ObjectValues) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> values;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, values,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectValuesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::GetOwnValues(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(value);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntries) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ true));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, PropertyFilter::ENUMERABLE_STRINGS,
+ false));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
@@ -687,26 +758,6 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
}
-RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
- CHECK_EQ(index->value() & 1, 1);
- FieldIndex field_index =
- FieldIndex::ForLoadByFieldIndex(object->map(), index->value());
- if (field_index.is_inobject()) {
- CHECK(field_index.property_index() <
- object->map()->GetInObjectProperties());
- } else {
- CHECK(field_index.outobject_array_index() <
- object->property_dictionary()->length());
- }
- return *JSObject::FastPropertyAt(object, Representation::Double(),
- field_index);
-}
-
-
RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -724,13 +775,6 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
}
-RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
-}
-
static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
return obj->IsNullOrUndefined(isolate) || obj->IsCallable();
}
@@ -770,10 +814,11 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 4);
CONVERT_SMI_ARG_CHECKED(index, 5);
- StoreDataPropertyInLiteralICNexus nexus(vector, vector->ToSlot(index));
+ FeedbackNexus nexus(vector, FeedbackVector::ToSlot(index));
if (nexus.ic_state() == UNINITIALIZED) {
if (name->IsUniqueName()) {
- nexus.ConfigureMonomorphic(name, handle(object->map()));
+ nexus.ConfigureMonomorphic(name, handle(object->map()),
+ Handle<Code>::null());
} else {
nexus.ConfigureMegamorphic(PROPERTY);
}
@@ -833,31 +878,12 @@ RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
}
DCHECK(vector->metadata()->HasTypeProfileSlot());
- CollectTypeProfileNexus nexus(vector, vector->GetTypeProfileSlot());
+ FeedbackNexus nexus(vector, vector->GetTypeProfileSlot());
nexus.Collect(type, position->value());
return isolate->heap()->undefined_value();
}
-// Return property without being observable by accessors or interceptors.
-RUNTIME_FUNCTION(Runtime_GetDataProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- return *JSReceiver::GetDataProperty(object, name);
-}
-
-RUNTIME_FUNCTION(Runtime_GetConstructorName) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-
- CHECK(!object->IsNullOrUndefined(isolate));
- Handle<JSReceiver> recv = Object::ToObject(isolate, object).ToHandleChecked();
- return *JSReceiver::GetConstructorName(recv);
-}
-
RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -1175,9 +1201,13 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
RUNTIME_FUNCTION(Runtime_IterableToListCanBeElided) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- if (!obj->IsJSObject()) return isolate->heap()->ToBoolean(false);
+ // If an iterator symbol is added to the Number prototype, we could see a Smi.
+ if (obj->IsSmi()) return isolate->heap()->ToBoolean(false);
+ if (!HeapObject::cast(*obj)->IsJSObject()) {
+ return isolate->heap()->ToBoolean(false);
+ }
// While iteration alone may not have observable side-effects, calling
// toNumber on an object will. Make sure the arg is not an array of objects.
@@ -1203,5 +1233,27 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
return *desc.ToPropertyDescriptorObject(isolate);
}
+RUNTIME_FUNCTION(Runtime_AddPrivateField) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Symbol, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ DCHECK(key->is_private_field());
+
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, o, key, LookupIterator::OWN);
+
+ if (it.IsFound()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kVarRedeclaration, key));
+ }
+
+ CHECK(Object::AddDataProperty(&it, value, NONE, kDontThrow,
+ Object::MAY_BE_STORE_FROM_KEYED)
+ .FromJust());
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index 42a7e21b82..d01d115892 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -9,33 +9,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_Multiply) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Multiply(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_Divide) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Divide(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_Modulus) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Modulus(isolate, lhs, rhs));
-}
-
-
RUNTIME_FUNCTION(Runtime_Add) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -45,69 +18,6 @@ RUNTIME_FUNCTION(Runtime_Add) {
}
-RUNTIME_FUNCTION(Runtime_Subtract) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::Subtract(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ShiftLeft) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftLeft(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ShiftRight) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftRight(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ShiftRightLogical) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate,
- Object::ShiftRightLogical(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BitwiseAnd) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseAnd(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BitwiseOr) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseOr(isolate, lhs, rhs));
-}
-
-
-RUNTIME_FUNCTION(Runtime_BitwiseXor) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseXor(isolate, lhs, rhs));
-}
-
RUNTIME_FUNCTION(Runtime_Equal) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -184,14 +94,5 @@ RUNTIME_FUNCTION(Runtime_GreaterThanOrEqual) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-RUNTIME_FUNCTION(Runtime_InstanceOf) {
- HandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
- RETURN_RESULT_OR_FAILURE(isolate,
- Object::InstanceOf(isolate, object, callable));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 2c28cd3c98..2d3a4fda50 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -1,8 +1,10 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#include "src/runtime/runtime-utils.h"
+#include "src/api.h"
#include "src/arguments.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -12,27 +14,6 @@
namespace v8 {
namespace internal {
-namespace {
-
-void PromiseRejectEvent(Isolate* isolate, Handle<JSPromise> promise,
- Handle<Object> rejected_promise, Handle<Object> value,
- bool debug_event) {
- isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
-
- if (isolate->debug()->is_active() && debug_event) {
- isolate->debug()->OnPromiseReject(rejected_promise, value);
- }
-
- // Report only if we don't actually have a handler.
- if (!promise->has_handler()) {
- isolate->ReportPromiseReject(promise, value,
- v8::kPromiseRejectWithNoHandler);
- }
-}
-
-} // namespace
-
RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
@@ -41,21 +22,19 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
Handle<Object> rejected_promise = promise;
if (isolate->debug()->is_active()) {
- // If the Promise.reject call is caught, then this will return
- // undefined, which will be interpreted by PromiseRejectEvent
- // as being a caught exception event.
+ // If the Promise.reject() call is caught, then this will return
+ // undefined, which we interpret as being a caught exception event.
rejected_promise = isolate->GetPromiseOnStackOnThrow();
}
- PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
- return isolate->heap()->undefined_value();
-}
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+ isolate->debug()->OnPromiseReject(rejected_promise, value);
-RUNTIME_FUNCTION(Runtime_ReportPromiseReject) {
- DCHECK_EQ(2, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- isolate->ReportPromiseReject(promise, value, v8::kPromiseRejectWithNoHandler);
+ // Report only if we don't actually have a handler.
+ if (!promise->has_handler()) {
+ isolate->ReportPromiseReject(promise, value,
+ v8::kPromiseRejectWithNoHandler);
+ }
return isolate->heap()->undefined_value();
}
@@ -73,7 +52,9 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ Handle<CallableTask> microtask =
+ isolate->factory()->NewCallableTask(function, isolate->native_context());
isolate->EnqueueMicrotask(microtask);
return isolate->heap()->undefined_value();
}
@@ -85,6 +66,17 @@ RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_RunMicrotaskCallback) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, microtask_callback, 0);
+ CONVERT_ARG_CHECKED(Object, microtask_data, 1);
+ MicrotaskCallback callback = ToCData<MicrotaskCallback>(microtask_callback);
+ void* data = ToCData<void*>(microtask_data);
+ callback(data);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_PromiseStatus) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -118,23 +110,17 @@ RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_PromiseHookResolve) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- if (promise->IsJSPromise()) {
- isolate->RunPromiseHook(PromiseHookType::kBefore,
- Handle<JSPromise>::cast(promise),
- isolate->factory()->undefined_value());
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, payload, 0);
+ Handle<JSPromise> promise;
+ if (JSPromise::From(payload).ToHandle(&promise)) {
+ if (isolate->debug()->is_active()) isolate->PushPromise(promise);
+ if (promise->IsJSPromise()) {
+ isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
+ isolate->factory()->undefined_value());
+ }
}
return isolate->heap()->undefined_value();
}
@@ -142,14 +128,37 @@ RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- if (promise->IsJSPromise()) {
- isolate->RunPromiseHook(PromiseHookType::kAfter,
- Handle<JSPromise>::cast(promise),
- isolate->factory()->undefined_value());
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, payload, 0);
+ Handle<JSPromise> promise;
+ if (JSPromise::From(payload).ToHandle(&promise)) {
+ if (isolate->debug()->is_active()) isolate->PopPromise();
+ if (promise->IsJSPromise()) {
+ isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
+ isolate->factory()->undefined_value());
+ }
}
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_RejectPromise) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Oddball, debug_event, 2);
+ return *JSPromise::Reject(promise, reason, debug_event->BooleanValue());
+}
+
+RUNTIME_FUNCTION(Runtime_ResolvePromise) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, resolution, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSPromise::Resolve(promise, resolution));
+ return *result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index d0afcd2636..920f37cf98 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -1920,14 +1920,6 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
}
-RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- Object* exception = isolate->pending_exception();
- isolate->clear_pending_exception();
- return isolate->ReThrow(exception);
-}
-
RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 76f291f90f..3d2d7940a4 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -123,7 +123,7 @@ Object* DeclareGlobal(
// named interceptor or the interceptor is not masking.
if (!global->HasNamedInterceptor() ||
global->GetNamedInterceptor()->non_masking()) {
- LoadGlobalICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
nexus.ConfigurePropertyCellMode(it.GetPropertyCell());
}
}
@@ -141,7 +141,8 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 4, {
Handle<String> name(String::cast(declarations->get(i)), isolate);
FeedbackSlot slot(Smi::ToInt(declarations->get(i + 1)));
- Handle<Object> possibly_literal_slot(declarations->get(i + 2), isolate);
+ Handle<Object> possibly_feedback_cell_slot(declarations->get(i + 2),
+ isolate);
Handle<Object> initial_value(declarations->get(i + 3), isolate);
bool is_var = initial_value->IsUndefined(isolate);
@@ -150,16 +151,18 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
Handle<Object> value;
if (is_function) {
- DCHECK(possibly_literal_slot->IsSmi());
+ DCHECK(possibly_feedback_cell_slot->IsSmi());
// Copy the function and update its context. Use it as value.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(initial_value);
- FeedbackSlot literals_slot(Smi::ToInt(*possibly_literal_slot));
- Handle<Cell> literals(Cell::cast(feedback_vector->Get(literals_slot)),
- isolate);
+ FeedbackSlot feedback_cells_slot(
+ Smi::ToInt(*possibly_feedback_cell_slot));
+ Handle<FeedbackCell> feedback_cell(
+ FeedbackCell::cast(feedback_vector->Get(feedback_cells_slot)),
+ isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, literals, TENURED);
+ shared, context, feedback_cell, TENURED);
value = function;
} else {
value = isolate->factory()->undefined_value();
@@ -635,34 +638,27 @@ RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
- CONVERT_SMI_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 1);
Handle<Context> context(isolate->context(), isolate);
- FeedbackSlot slot = FeedbackVector::ToSlot(index);
- Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, vector_cell, NOT_TENURED);
+ shared, context, feedback_cell, NOT_TENURED);
return *function;
}
-
RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
- CONVERT_SMI_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(FeedbackCell, feedback_cell, 1);
Handle<Context> context(isolate->context(), isolate);
- FeedbackSlot slot = FeedbackVector::ToSlot(index);
- Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, context, vector_cell, TENURED);
+ shared, context, feedback_cell, TENURED);
return *function;
}
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 8f6b887f62..6f203b3d01 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -216,35 +216,16 @@ RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
isolate->factory()->undefined_value());
}
-RUNTIME_FUNCTION(Runtime_SubString) {
+RUNTIME_FUNCTION(Runtime_StringSubstring) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
-
CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- int start, end;
- // We have a fast integer-only case here to avoid a conversion to double in
- // the common case where from and to are Smis.
- if (args[1]->IsSmi() && args[2]->IsSmi()) {
- CONVERT_SMI_ARG_CHECKED(from_number, 1);
- CONVERT_SMI_ARG_CHECKED(to_number, 2);
- start = from_number;
- end = to_number;
- } else if (args[1]->IsNumber() && args[2]->IsNumber()) {
- CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
- CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
- start = FastD2IChecked(from_number);
- end = FastD2IChecked(to_number);
- } else {
- return isolate->ThrowIllegalOperation();
- }
- // The following condition is intentionally robust because the SubString
- // builtin delegates here and we test this in
- // cctest/test-strings/RobustSubStringStub.
- if (end < start || start < 0 || end > string->length()) {
- return isolate->ThrowIllegalOperation();
- }
+ CONVERT_INT32_ARG_CHECKED(start, 1);
+ CONVERT_INT32_ARG_CHECKED(end, 2);
+ DCHECK_LE(0, start);
+ DCHECK_LE(start, end);
+ DCHECK_LE(end, string->length());
isolate->counters()->sub_string_runtime()->Increment();
-
return *isolate->factory()->NewSubString(string, start, end);
}
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 2eaef63bbf..488aa756c6 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -12,28 +12,25 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_CreateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- CHECK(name->IsString() || name->IsUndefined(isolate));
- Handle<Symbol> symbol = isolate->factory()->NewSymbol();
- if (name->IsString()) symbol->set_name(*name);
+ DCHECK_GE(1, args.length());
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
+ if (args.length() == 1) {
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+ CHECK(name->IsString() || name->IsUndefined(isolate));
+ if (name->IsString()) symbol->set_name(*name);
+ }
return *symbol;
}
-
-RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreatePrivateFieldSymbol) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- CHECK(name->IsString() || name->IsUndefined(isolate));
- Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
- if (name->IsString()) symbol->set_name(*name);
+ DCHECK_EQ(0, args.length());
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateFieldSymbol();
return *symbol;
}
-
RUNTIME_FUNCTION(Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 01e2b198a6..6b2f3467fc 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -175,22 +175,6 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
isolate->concurrent_recompilation_enabled());
}
-RUNTIME_FUNCTION(Runtime_TypeProfile) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (function->has_feedback_vector()) {
- FeedbackVector* vector = function->feedback_vector();
- if (vector->metadata()->HasTypeProfileSlot()) {
- FeedbackSlot slot = vector->GetTypeProfileSlot();
- CollectTypeProfileNexus nexus(vector, slot);
- return nexus.GetTypeProfile();
- }
- }
- return *isolate->factory()->NewJSObject(isolate->object_function());
-}
-
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
@@ -252,8 +236,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
: "non-concurrent");
}
- // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
function->MarkForOptimization(concurrency_mode);
@@ -470,121 +453,6 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
- // This only supports the case where the function being exported
- // calls an intermediate function, and the intermediate function
- // calls exactly one imported function
- HandleScope scope(isolate);
- CHECK_EQ(args.length(), 2);
- // It takes two parameters, the first one is the JSFunction,
- // The second one is the type
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // If type is 0, it means that it is supposed to be a direct call into a wasm
- // function.
- // If type is 1, it means that it is supposed to have wrappers.
- CONVERT_ARG_HANDLE_CHECKED(Smi, type, 1);
- Handle<Code> export_code = handle(function->code());
- CHECK(export_code->kind() == Code::JS_TO_WASM_FUNCTION);
- int const mask =
- RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
- : RelocInfo::CODE_TARGET);
- // check the type of the $export_fct
- wasm::WasmCode* export_fct = nullptr;
- Handle<Code> export_fct_handle;
- wasm::WasmCode* intermediate_fct = nullptr;
- Handle<Code> intermediate_fct_handle;
-
- int count = 0;
- for (RelocIterator it(*export_code, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = FLAG_wasm_jit_to_native
- ? rinfo->js_to_wasm_address()
- : rinfo->target_address();
- if (FLAG_wasm_jit_to_native) {
- wasm::WasmCode* target =
- isolate->wasm_engine()->code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::kFunction) {
- ++count;
- export_fct = target;
- }
- } else {
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++count;
- export_fct_handle = handle(target);
- }
- }
- }
- CHECK_EQ(count, 1);
- // check the type of the intermediate_fct
- count = 0;
- if (FLAG_wasm_jit_to_native) {
- for (RelocIterator it(export_fct->instructions(), export_fct->reloc_info(),
- export_fct->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- wasm::WasmCode* target =
- isolate->wasm_engine()->code_manager()->LookupCode(target_address);
- if (target->kind() == wasm::WasmCode::kFunction) {
- ++count;
- intermediate_fct = target;
- }
- }
- } else {
- count = 0;
- for (RelocIterator it(*export_fct_handle, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++count;
- intermediate_fct_handle = handle(target);
- }
- }
- }
- CHECK_EQ(count, 1);
- // Check the type of the imported exported function, it should be also a wasm
- // function in our case.
- CHECK(type->value() == 0 || type->value() == 1);
-
- count = 0;
- if (FLAG_wasm_jit_to_native) {
- wasm::WasmCode::Kind target_kind = type->value() == 0
- ? wasm::WasmCode::kWasmToWasmWrapper
- : wasm::WasmCode::kWasmToJsWrapper;
- for (RelocIterator it(intermediate_fct->instructions(),
- intermediate_fct->reloc_info(),
- intermediate_fct->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- wasm::WasmCode* target =
- isolate->wasm_engine()->code_manager()->LookupCode(target_address);
- if (target->kind() == target_kind) {
- ++count;
- }
- }
- } else {
- Code::Kind target_kind = type->value() == 0 ? Code::WASM_TO_WASM_FUNCTION
- : Code::WASM_TO_JS_FUNCTION;
- count = 0;
- for (RelocIterator it(*intermediate_fct_handle, mask); !it.done();
- it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == target_kind) {
- ++count;
- }
- }
- }
- CHECK_LE(count, 1);
- return isolate->heap()->ToBoolean(count == 1);
-}
-
RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
HandleScope scope(isolate);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -757,6 +625,18 @@ RUNTIME_FUNCTION(Runtime_SetFlags) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, arg, 0);
+ if (arg->IsTrue(isolate)) {
+ isolate->set_force_slow_path(true);
+ } else {
+ DCHECK(arg->IsFalse(isolate));
+ isolate->set_force_slow_path(false);
+ }
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_Abort) {
SealHandleScope shs(isolate);
@@ -774,6 +654,10 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ if (FLAG_disable_abortjs) {
+ base::OS::PrintError("[disabled] abort: %s\n", message->ToCString().get());
+ return nullptr;
+ }
base::OS::PrintError("abort: %s\n", message->ToCString().get());
isolate->PrintStack(stderr);
base::OS::Abort();
@@ -845,31 +729,6 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
return obj; // return TOS
}
-RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
- HandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, exception_obj, 0);
-
- Factory* factory = isolate->factory();
- Handle<JSMessageObject> message_obj =
- isolate->CreateMessage(exception_obj, nullptr);
-
- Handle<JSObject> message = factory->NewJSObject(isolate->object_function());
-
- Handle<String> key;
- Handle<Object> value;
-
- key = factory->NewStringFromAsciiChecked("start_pos");
- value = handle(Smi::FromInt(message_obj->start_position()), isolate);
- JSObject::SetProperty(message, key, value, LanguageMode::kStrict).Assert();
-
- key = factory->NewStringFromAsciiChecked("end_pos");
- value = handle(Smi::FromInt(message_obj->end_position()), isolate);
- JSObject::SetProperty(message, key, value, LanguageMode::kStrict).Assert();
-
- return *message;
-}
-
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -964,7 +823,6 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(HoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FixedTypedArrayElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
@@ -985,7 +843,7 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
+ return isolate->heap()->ToBoolean(isolate->IsSpeciesLookupChainIntact());
}
// Take a compiled wasm module, serialize it and copy the buffer into an array
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 85fb2d2173..f8fd3cc622 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -14,14 +14,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
- return holder->byte_length();
-}
-
-
RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -56,7 +48,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, source, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 2);
size_t length;
@@ -66,19 +58,12 @@ RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
return accessor->CopyElements(source, target, length);
}
-#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
- RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \
- HandleScope scope(isolate); \
- DCHECK_EQ(1, args.length()); \
- CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0); \
- return holder->accessor(); \
- }
-
-BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length)
-BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset)
-BUFFER_VIEW_GETTER(TypedArray, Length, length)
-
-#undef BUFFER_VIEW_GETTER
+RUNTIME_FUNCTION(Runtime_TypedArrayGetLength) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ return holder->length();
+}
RUNTIME_FUNCTION(Runtime_ArrayBufferViewWasNeutered) {
HandleScope scope(isolate);
@@ -162,58 +147,6 @@ RUNTIME_FUNCTION(Runtime_IsTypedArray) {
return isolate->heap()->ToBoolean(args[0]->IsJSTypedArray());
}
-RUNTIME_FUNCTION(Runtime_IsSharedTypedArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(
- args[0]->IsJSTypedArray() &&
- JSTypedArray::cast(args[0])->GetBuffer()->is_shared());
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- if (!args[0]->IsJSTypedArray()) {
- return isolate->heap()->false_value();
- }
-
- Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
- return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
- obj->type() != kExternalFloat32Array &&
- obj->type() != kExternalFloat64Array &&
- obj->type() != kExternalUint8ClampedArray);
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- if (!args[0]->IsJSTypedArray()) {
- return isolate->heap()->false_value();
- }
-
- Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
- return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
- obj->type() == kExternalInt32Array);
-}
-
-RUNTIME_FUNCTION(Runtime_TypedArraySpeciesCreateByLength) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 2);
- Handle<JSTypedArray> exemplar = args.at<JSTypedArray>(0);
- Handle<Object> length = args.at(1);
- int argc = 1;
- ScopedVector<Handle<Object>> argv(argc);
- argv[0] = length;
- Handle<JSTypedArray> result_array;
- // TODO(tebbi): Pass correct method name.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_array,
- JSTypedArray::SpeciesCreate(isolate, exemplar, argc, argv.start(), ""));
- return *result_array;
-}
-
// 22.2.3.23 %TypedArray%.prototype.set ( overloaded [ , offset ] )
RUNTIME_FUNCTION(Runtime_TypedArraySet) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index d05f4984c6..2bfd280803 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -44,7 +44,6 @@ namespace internal {
F(GetArrayKeys, 2, 1) \
F(TrySliceSimpleNonFastElements, 3, 1) \
F(NewArray, -1 /* >= 3 */, 1) \
- F(FunctionBind, -1, 1) \
F(NormalizeElements, 1, 1) \
F(GrowArrayElements, 2, 1) \
F(HasComplexElements, 1, 1) \
@@ -56,9 +55,6 @@ namespace internal {
F(SpreadIterablePrepare, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
- F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
- F(ThrowNotInt32SharedTypedArrayError, 1, 1) \
- F(ThrowInvalidAtomicAccessIndexError, 0, 1) \
F(AtomicsExchange, 3, 1) \
F(AtomicsCompareExchange, 4, 1) \
F(AtomicsAdd, 3, 1) \
@@ -78,7 +74,8 @@ namespace internal {
F(BigIntEqualToString, 2, 1) \
F(BigIntToBoolean, 1, 1) \
F(BigIntToNumber, 1, 1) \
- F(BigIntUnaryOp, 2, 1)
+ F(BigIntUnaryOp, 2, 1) \
+ F(ToBigInt, 1, 1)
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowUnsupportedSuperError, 0, 1) \
@@ -99,8 +96,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(TheHole, 0, 1) \
- F(GenericHash, 1, 1) \
- F(GetExistingHash, 1, 1) \
F(SetGrow, 1, 1) \
F(SetShrink, 1, 1) \
F(SetIteratorClone, 1, 1) \
@@ -108,7 +103,6 @@ namespace internal {
F(MapGrow, 1, 1) \
F(MapIteratorClone, 1, 1) \
F(GetWeakMapEntries, 2, 1) \
- F(WeakCollectionInitialize, 1, 1) \
F(WeakCollectionDelete, 3, 1) \
F(WeakCollectionSet, 4, 1) \
F(GetWeakSetValues, 2, 1) \
@@ -130,12 +124,10 @@ namespace internal {
#define FOR_EACH_INTRINSIC_DATE(F) \
F(IsDate, 1, 1) \
- F(DateCurrentTime, 0, 1) \
- F(ThrowNotDateError, 0, 1)
+ F(DateCurrentTime, 0, 1)
#define FOR_EACH_INTRINSIC_DEBUG(F) \
F(HandleDebuggerStatement, 0, 1) \
- F(SetDebugEventListener, 2, 1) \
F(ScheduleBreak, 0, 1) \
F(DebugGetInternalProperties, 1, 1) \
F(DebugGetPropertyDetails, 2, 1) \
@@ -153,12 +145,7 @@ namespace internal {
F(GetGeneratorScopeCount, 1, 1) \
F(GetGeneratorScopeDetails, 2, 1) \
F(SetScopeVariableValue, 6, 1) \
- F(DebugPrintScopes, 0, 1) \
- F(SetBreakPointsActive, 1, 1) \
F(GetBreakLocations, 1, 1) \
- F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 3, 1) \
- F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 2, 1) \
@@ -177,22 +164,16 @@ namespace internal {
F(GetHeapUsage, 0, 1) \
F(GetScript, 1, 1) \
F(ScriptLineCount, 1, 1) \
- F(ScriptLineStartPosition, 2, 1) \
- F(ScriptLineEndPosition, 2, 1) \
F(ScriptLocationFromLine, 4, 1) \
F(ScriptLocationFromLine2, 4, 1) \
F(ScriptPositionInfo, 3, 1) \
F(ScriptPositionInfo2, 3, 1) \
- F(ScriptSourceLine, 2, 1) \
F(DebugOnFunctionCall, 1, 1) \
F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
- F(DebugRecordGenerator, 1, 1) \
F(DebugPushPromise, 1, 1) \
F(DebugPopPromise, 0, 1) \
- F(DebugPromiseReject, 2, 1) \
F(DebugAsyncFunctionPromiseCreated, 1, 1) \
F(DebugIsActive, 0, 1) \
- F(DebugBreakInOptimizedCode, 0, 1) \
F(DebugCollectCoverage, 0, 1) \
F(DebugTogglePreciseCoverage, 1, 1) \
F(DebugToggleBlockCoverage, 1, 1) \
@@ -222,8 +203,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE_FEEDBACK(F) \
- F(InterpreterDeserializeLazy, 2, 1) \
- F(InterpreterNewClosure, 4, 1)
+ F(InterpreterDeserializeLazy, 2, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(FunctionGetName, 1, 1) \
@@ -232,7 +212,6 @@ namespace internal {
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetContextData, 1, 1) \
- F(FunctionSetLength, 2, 1) \
F(FunctionIsAPIFunction, 1, 1) \
F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1) \
@@ -246,11 +225,14 @@ namespace internal {
F(GeneratorClose, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
F(GeneratorGetReceiver, 1, 1) \
- F(GeneratorGetContext, 1, 1) \
F(GeneratorGetInputOrDebugPos, 1, 1) \
+ F(AsyncFunctionAwaitCaught, 3, 1) \
+ F(AsyncFunctionAwaitUncaught, 3, 1) \
F(AsyncGeneratorResolve, 3, 1) \
F(AsyncGeneratorReject, 2, 1) \
F(AsyncGeneratorYield, 3, 1) \
+ F(AsyncGeneratorAwaitCaught, 2, 1) \
+ F(AsyncGeneratorAwaitUncaught, 2, 1) \
F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetSourcePosition, 1, 1) \
F(GeneratorGetResumeMode, 1, 1) \
@@ -311,35 +293,28 @@ namespace internal {
F(PromoteScheduledException, 0, 1) \
F(ReThrow, 1, 1) \
F(RunMicrotasks, 0, 1) \
+ F(RunMicrotaskCallback, 2, 1) \
F(StackGuard, 0, 1) \
F(Throw, 1, 1) \
F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCannotConvertToPrimitive, 0, 1) \
F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowCalledOnNullOrUndefined, 1, 1) \
F(ThrowConstructedNonConstructable, 1, 1) \
F(ThrowConstructorReturnedNonObject, 0, 1) \
- F(ThrowGeneratorRunning, 0, 1) \
- F(ThrowIncompatibleMethodReceiver, 2, 1) \
- F(ThrowInvalidHint, 1, 1) \
F(ThrowInvalidStringLength, 0, 1) \
F(ThrowInvalidTypedArrayAlignment, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowThrowMethodMissing, 0, 1) \
F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowNonCallableInInstanceOfCheck, 0, 1) \
- F(ThrowNonObjectInInstanceOfCheck, 0, 1) \
F(ThrowNotConstructor, 1, 1) \
F(ThrowRangeError, -1 /* >= 1 */, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(ThrowUndefinedOrNullToObject, 1, 1) \
F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
F(AllowDynamicFunction, 1, 1) \
- F(GetTemplateObject, 1, 1) \
+ F(CreateTemplateObject, 1, 1) \
F(ReportMessage, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
@@ -366,9 +341,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_MODULE(F) \
F(DynamicImportCall, 2, 1) \
F(GetImportMetaObject, 0, 1) \
- F(GetModuleNamespace, 1, 1) \
- F(LoadModuleVariable, 1, 1) \
- F(StoreModuleVariable, 2, 1)
+ F(GetModuleNamespace, 1, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F) \
F(IsValidSmi, 1, 1) \
@@ -391,6 +364,10 @@ namespace internal {
F(ObjectCreate, 2, 1) \
F(InternalSetPrototype, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(ObjectValues, 1, 1) \
+ F(ObjectValuesSkipFastPath, 1, 1) \
+ F(ObjectEntries, 1, 1) \
+ F(ObjectEntriesSkipFastPath, 1, 1) \
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
F(AddNamedProperty, 4, 1) \
@@ -406,14 +383,10 @@ namespace internal {
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
F(CompleteInobjectSlackTrackingForMap, 1, 1) \
- F(LoadMutableDouble, 2, 1) \
F(TryMigrateInstance, 1, 1) \
- F(IsJSGlobalProxy, 1, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
F(DefineDataPropertyInLiteral, 6, 1) \
F(CollectTypeProfile, 3, 1) \
- F(GetDataProperty, 2, 1) \
- F(GetConstructorName, 1, 1) \
F(HasFastPackedElements, 1, 1) \
F(ValueOf, 1, 1) \
F(IsJSReceiver, 1, 1) \
@@ -437,21 +410,12 @@ namespace internal {
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
F(CreateDataProperty, 3, 1) \
+ F(AddPrivateField, 3, 1) \
F(IterableToListCanBeElided, 1, 1) \
F(GetOwnPropertyDescriptor, 2, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
- F(Multiply, 2, 1) \
- F(Divide, 2, 1) \
- F(Modulus, 2, 1) \
F(Add, 2, 1) \
- F(Subtract, 2, 1) \
- F(ShiftLeft, 2, 1) \
- F(ShiftRight, 2, 1) \
- F(ShiftRightLogical, 2, 1) \
- F(BitwiseAnd, 2, 1) \
- F(BitwiseOr, 2, 1) \
- F(BitwiseXor, 2, 1) \
F(Equal, 2, 1) \
F(NotEqual, 2, 1) \
F(StrictEqual, 2, 1) \
@@ -459,13 +423,11 @@ namespace internal {
F(LessThan, 2, 1) \
F(GreaterThan, 2, 1) \
F(LessThanOrEqual, 2, 1) \
- F(GreaterThanOrEqual, 2, 1) \
- F(InstanceOf, 2, 1)
+ F(GreaterThanOrEqual, 2, 1)
#define FOR_EACH_INTRINSIC_PROMISE(F) \
F(EnqueueMicrotask, 1, 1) \
F(PromiseHookInit, 2, 1) \
- F(PromiseHookResolve, 1, 1) \
F(PromiseHookBefore, 1, 1) \
F(PromiseHookAfter, 1, 1) \
F(PromiseMarkAsHandled, 1, 1) \
@@ -473,7 +435,8 @@ namespace internal {
F(PromiseRevokeReject, 1, 1) \
F(PromiseResult, 1, 1) \
F(PromiseStatus, 1, 1) \
- F(ReportPromiseReject, 2, 1)
+ F(RejectPromise, 3, 1) \
+ F(ResolvePromise, 2, 1)
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
@@ -488,7 +451,6 @@ namespace internal {
F(IsRegExp, 1, 1) \
F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
- F(RegExpExecReThrow, 0, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
F(RegExpInternalReplace, 3, 1) \
F(RegExpReplace, 3, 1) \
@@ -507,8 +469,8 @@ namespace internal {
F(NewRestParameter, 1, 1) \
F(NewSloppyArguments, 3, 1) \
F(NewArgumentsElements, 3, 1) \
- F(NewClosure, 3, 1) \
- F(NewClosure_Tenured, 3, 1) \
+ F(NewClosure, 2, 1) \
+ F(NewClosure_Tenured, 2, 1) \
F(NewScriptContext, 2, 1) \
F(NewFunctionContext, 2, 1) \
F(PushModuleContext, 3, 1) \
@@ -530,7 +492,7 @@ namespace internal {
F(StringIndexOf, 3, 1) \
F(StringIndexOfUnchecked, 3, 1) \
F(StringLastIndexOf, 2, 1) \
- F(SubString, 3, 1) \
+ F(StringSubstring, 3, 1) \
F(StringAdd, 2, 1) \
F(InternalizeString, 1, 1) \
F(StringCharCodeAt, 2, 1) \
@@ -548,106 +510,103 @@ namespace internal {
F(StringCharFromCode, 1, 1) \
F(StringMaxLength, 0, 1)
-#define FOR_EACH_INTRINSIC_SYMBOL(F) \
- F(CreateSymbol, 1, 1) \
- F(CreatePrivateSymbol, 1, 1) \
- F(SymbolDescription, 1, 1) \
- F(SymbolDescriptiveString, 1, 1) \
+#define FOR_EACH_INTRINSIC_SYMBOL(F) \
+ F(CreatePrivateSymbol, -1 /* <= 1 */, 1) \
+ F(CreatePrivateFieldSymbol, 0, 1) \
+ F(SymbolDescription, 1, 1) \
+ F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
#define FOR_EACH_INTRINSIC_TEST(F) \
- F(ConstructDouble, 2, 1) \
+ F(Abort, 1, 1) \
+ F(AbortJS, 1, 1) \
+ F(ClearFunctionFeedback, 1, 1) \
+ F(CompleteInobjectSlackTracking, 1, 1) \
F(ConstructConsString, 2, 1) \
+ F(ConstructDouble, 2, 1) \
+ F(DebugPrint, 1, 1) \
+ F(DebugTrace, 0, 1) \
+ F(DebugTrackRetainingPath, -1, 1) \
F(DeoptimizeFunction, 1, 1) \
F(DeoptimizeNow, 0, 1) \
- F(RunningInSimulator, 0, 1) \
- F(IsConcurrentRecompilationSupported, 0, 1) \
- F(OptimizeFunctionOnNextCall, -1, 1) \
- F(TypeProfile, 1, 1) \
- F(OptimizeOsr, -1, 1) \
- F(NeverOptimizeFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
+ F(DeserializeWasmModule, 2, 1) \
+ F(DisallowCodegenFromStrings, 1, 1) \
+ F(DisallowWasmCodegen, 1, 1) \
+ F(DisassembleFunction, 1, 1) \
+ F(FreezeWasmLazyCompilation, 1, 1) \
+ F(GetCallable, 0, 1) \
F(GetDeoptCount, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
F(GetUndetectable, 0, 1) \
- F(GetCallable, 0, 1) \
- F(ClearFunctionFeedback, 1, 1) \
- F(CheckWasmWrapperElision, 2, 1) \
- F(NotifyContextDisposed, 0, 1) \
- F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(DebugPrint, 1, 1) \
- F(DebugTrace, 0, 1) \
- F(DebugTrackRetainingPath, -1, 1) \
- F(PrintWithNameForAssert, 2, 1) \
- F(GetExceptionDetails, 1, 1) \
+ F(GetWasmRecoveredTrapCount, 0, 1) \
F(GlobalPrint, 1, 1) \
- F(SystemBreak, 0, 1) \
- F(SetFlags, 1, 1) \
- F(Abort, 1, 1) \
- F(AbortJS, 1, 1) \
- F(NativeScriptsCount, 0, 1) \
- F(DisassembleFunction, 1, 1) \
- F(TraceEnter, 0, 1) \
- F(TraceExit, 1, 1) \
- F(HaveSameMap, 2, 1) \
- F(InNewSpace, 1, 1) \
- F(HasFastElements, 1, 1) \
- F(HasSmiElements, 1, 1) \
- F(HasObjectElements, 1, 1) \
- F(HasSmiOrObjectElements, 1, 1) \
- F(HasDoubleElements, 1, 1) \
- F(HasHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
- F(HasSloppyArgumentsElements, 1, 1) \
- F(HasFixedTypedArrayElements, 1, 1) \
+ F(HasDoubleElements, 1, 1) \
+ F(HasFastElements, 1, 1) \
F(HasFastProperties, 1, 1) \
- F(HasFixedUint8Elements, 1, 1) \
+ F(HasFixedBigInt64Elements, 1, 1) \
+ F(HasFixedBigUint64Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
F(HasFixedInt8Elements, 1, 1) \
F(HasFixedUint16Elements, 1, 1) \
- F(HasFixedInt16Elements, 1, 1) \
F(HasFixedUint32Elements, 1, 1) \
- F(HasFixedInt32Elements, 1, 1) \
- F(HasFixedFloat32Elements, 1, 1) \
- F(HasFixedFloat64Elements, 1, 1) \
F(HasFixedUint8ClampedElements, 1, 1) \
- F(SpeciesProtector, 0, 1) \
- F(SerializeWasmModule, 1, 1) \
- F(DeserializeWasmModule, 2, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasHoleyElements, 1, 1) \
+ F(IsJSError, 1, 1) \
+ F(IsJSGeneratorObject, 1, 1) \
+ F(IsJSMapIterator, 1, 1) \
+ F(IsScriptWrapper, 1, 1) \
+ F(IsJSSetIterator, 1, 1) \
+ F(HasObjectElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
+ F(HasSmiElements, 1, 1) \
+ F(HasSmiOrObjectElements, 1, 1) \
+ F(HaveSameMap, 2, 1) \
+ F(HeapObjectVerify, 1, 1) \
+ F(InNewSpace, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(IsLiftoffFunction, 1, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
- F(GetWasmRecoveredTrapCount, 0, 1) \
- F(DisallowCodegenFromStrings, 1, 1) \
- F(DisallowWasmCodegen, 1, 1) \
+ F(NativeScriptsCount, 0, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
+ F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(OptimizeOsr, -1, 1) \
+ F(PrintWithNameForAssert, 2, 1) \
+ F(RedirectToWasmInterpreter, 2, 1) \
+ F(RunningInSimulator, 0, 1) \
+ F(SerializeWasmModule, 1, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
+ F(SetFlags, 1, 1) \
+ F(SetForceSlowPath, 1, 1) \
+ F(SetWasmCompileControls, 2, 1) \
+ F(SetWasmInstantiateControls, 0, 1) \
+ F(SpeciesProtector, 0, 1) \
+ F(SystemBreak, 0, 1) \
+ F(TraceEnter, 0, 1) \
+ F(TraceExit, 1, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
F(ValidateWasmInstancesChain, 2, 1) \
F(ValidateWasmModuleState, 1, 1) \
F(ValidateWasmOrphanedInstance, 1, 1) \
- F(SetWasmCompileControls, 2, 1) \
- F(SetWasmInstantiateControls, 0, 1) \
- F(HeapObjectVerify, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
- F(RedirectToWasmInterpreter, 2, 1) \
- F(WasmTraceMemory, 1, 1) \
- F(CompleteInobjectSlackTracking, 1, 1) \
- F(IsLiftoffFunction, 1, 1) \
- F(FreezeWasmLazyCompilation, 1, 1)
+ F(WasmTraceMemory, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- F(ArrayBufferGetByteLength, 1, 1) \
F(ArrayBufferNeuter, 1, 1) \
F(TypedArrayCopyElements, 3, 1) \
- F(ArrayBufferViewGetByteLength, 1, 1) \
- F(ArrayBufferViewGetByteOffset, 1, 1) \
F(ArrayBufferViewWasNeutered, 1, 1) \
F(TypedArrayGetLength, 1, 1) \
F(TypedArrayGetBuffer, 1, 1) \
F(TypedArraySortFast, 1, 1) \
F(TypedArraySet, 2, 1) \
- F(IsTypedArray, 1, 1) \
- F(IsSharedTypedArray, 1, 1) \
- F(IsSharedIntegerTypedArray, 1, 1) \
- F(IsSharedInteger32TypedArray, 1, 1) \
- F(TypedArraySpeciesCreateByLength, 2, 1)
+ F(IsTypedArray, 1, 1)
#define FOR_EACH_INTRINSIC_WASM(F) \
F(WasmGrowMemory, 1, 1) \
@@ -683,8 +642,7 @@ namespace internal {
F(StoreGlobalIC_Miss, 4, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
- F(StorePropertyWithInterceptor, 5, 1) \
- F(Unreachable, 0, 1)
+ F(StorePropertyWithInterceptor, 5, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
FOR_EACH_INTRINSIC_IC(F) \
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index 6323730b99..eac58186d5 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -46,7 +46,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return false; }
@@ -70,8 +70,8 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, constant_pool_);
- Assembler::set_target_address_at(nullptr, pc_, constant_pool_,
- target + delta, SKIP_ICACHE_FLUSH);
+ Assembler::set_target_address_at(pc_, constant_pool_, target + delta,
+ SKIP_ICACHE_FLUSH);
}
}
@@ -159,7 +159,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@@ -179,15 +179,15 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
@@ -197,15 +197,15 @@ void RelocInfo::WipeOut(Isolate* isolate) {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr,
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr,
SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
+ Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@@ -221,7 +221,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
// Operand constructors
-Operand::Operand(Register rm) : rm_(rm), rmode_(kRelocInfo_NONEPTR) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
int32_t Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode) {
@@ -281,26 +281,24 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
- set_target_address_at(isolate, pc, nullptr, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
}
// This code assumes the FIXED_SEQUENCE of IIHF/IILF
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
-
// Check for instructions generated by Asm::mov()
Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
SixByteInstr instr_1 =
@@ -315,7 +313,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 6);
+ Assembler::FlushICache(pc, 6);
}
patched = true;
} else {
@@ -344,7 +342,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Instruction::SetInstructionBits<SixByteInstr>(
reinterpret_cast<byte*>(pc + instr1_length), instr_2);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 12);
+ Assembler::FlushICache(pc, 12);
}
patched = true;
}
@@ -358,7 +356,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, 6);
+ Assembler::FlushICache(pc, 6);
}
patched = true;
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 166da1c451..56870fd7c0 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -279,22 +279,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_)));
}
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
- flush_mode);
+ Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
- ICacheFlushMode flush_mode) {
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -336,7 +334,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
- set_target_address_at(nullptr, pc, static_cast<Address>(nullptr),
+ set_target_address_at(pc, static_cast<Address>(nullptr),
reinterpret_cast<Address>(object.location()),
SKIP_ICACHE_FLUSH);
break;
@@ -2224,8 +2222,7 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, nullptr));
- set_target_address_at(nullptr, pc, nullptr, buffer_ + pos,
- SKIP_ICACHE_FLUSH);
+ set_target_address_at(pc, nullptr, buffer_ + pos, SKIP_ICACHE_FLUSH);
}
reloc_info_writer.Write(&rinfo);
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 4a5945de87..b0cc5b8cc4 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -345,12 +345,6 @@ C_REGISTERS(DECLARE_C_REGISTER)
// -----------------------------------------------------------------------------
// Machine instruction Operands
-#if V8_TARGET_ARCH_S390X
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
-#else
-constexpr RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
-#endif
-
// Class Operand represents a shifter operand in data processing instructions
// defining immediate numbers and masks
typedef uint8_t Length;
@@ -369,7 +363,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = kRelocInfo_NONEPTR)
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: rmode_(rmode)) {
value_.immediate = immediate;
}
@@ -379,7 +373,7 @@ class Operand BASE_EMBEDDED {
value_.immediate = reinterpret_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- INLINE(explicit Operand(Smi* value) : rmode_(kRelocInfo_NONEPTR)) {
+ INLINE(explicit Operand(Smi* value) : rmode_(RelocInfo::NONE)) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@@ -555,7 +549,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
@@ -570,12 +564,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the IIHF/IILF instruction pair.
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 783b995c72..91396bb597 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -435,6 +435,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
+ // Reset the masking register.
+ if (FLAG_branch_load_poisoning) {
+ __ ResetSpeculationPoisonRegister();
+ }
+
// Compute the handler entry address and jump to it.
__ mov(r3, Operand(pending_handler_entrypoint_address));
__ LoadP(r3, MemOperand(r3));
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index df02570783..ecec9cb408 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -33,10 +33,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
- !RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index de4db00cf1..66d77d1250 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -288,6 +288,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
return 8;
}
+ break;
case 'm': {
return FormatMask(instr, format);
}
diff --git a/deps/v8/src/s390/frame-constants-s390.h b/deps/v8/src/s390/frame-constants-s390.h
index 3c2a4c89d3..54638f56bc 100644
--- a/deps/v8/src/s390/frame-constants-s390.h
+++ b/deps/v8/src/s390/frame-constants-s390.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_S390_FRAMES_S390_H_
-#define V8_S390_FRAMES_S390_H_
+#ifndef V8_S390_FRAME_CONSTANTS_S390_H_
+#define V8_S390_FRAME_CONSTANTS_S390_H_
namespace v8 {
namespace internal {
@@ -45,4 +45,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_S390_FRAMES_S390_H_
+#endif // V8_S390_FRAME_CONSTANTS_S390_H_
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 3cb4f2e375..eae0739361 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -66,12 +66,6 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r4, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index fe24884378..50db39c6b5 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -15,6 +15,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@@ -1049,7 +1050,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Clear top frame.
mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
- StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0);
+ StoreP(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
// Restore current context from top and clear it in debug mode.
mov(ip,
@@ -1215,13 +1216,29 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ mov(r6, Operand(debug_is_active));
+ tm(MemOperand(r6), Operand::Zero());
+ bne(&skip_hook);
+
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r6, Operand(debug_hook_avtive));
- LoadB(r6, MemOperand(r6));
- CmpP(r6, Operand::Zero());
+ tm(MemOperand(r6), Operand::Zero());
+ beq(&call_hook);
+
+ LoadP(r6, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(r6, &skip_hook);
+ LoadP(r6, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
+ SmiUntag(r0, r6);
+ tmll(r0, Operand(DebugInfo::kBreakAtEntry));
beq(&skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1279,7 +1296,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- Register code = ip;
+ Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -1332,14 +1349,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(r3, no_reg, expected, actual, flag);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag) {
- Move(r3, function);
- InvokeFunction(r3, expected, actual, flag);
-}
-
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@@ -1529,6 +1538,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ intptr_t bytes_address = reinterpret_cast<intptr_t>(stream->bytes());
+ mov(kOffHeapTrampolineRegister, Operand(bytes_address));
+ Jump(kOffHeapTrampolineRegister);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0 && is_int8(value));
@@ -1947,7 +1962,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
value = src.immediate();
}
- if (src.rmode() != kRelocInfo_NONEPTR) {
+ if (src.rmode() != RelocInfo::NONE) {
// some form of relocation needed
RecordRelocInfo(src.rmode(), value);
}
@@ -3166,7 +3181,7 @@ void TurboAssembler::CmpP(Register src1, Register src2) {
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
- if (opnd.rmode() == kRelocInfo_NONEPTR) {
+ if (opnd.rmode() == RelocInfo::NONE) {
intptr_t value = opnd.immediate();
if (is_int16(value))
chi(dst, opnd);
@@ -3183,7 +3198,7 @@ void TurboAssembler::Cmp32(Register dst, const Operand& opnd) {
// This helper will set up proper relocation entries if required.
void TurboAssembler::CmpP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X
- if (opnd.rmode() == kRelocInfo_NONEPTR) {
+ if (opnd.rmode() == RelocInfo::NONE) {
cgfi(dst, opnd);
} else {
mov(r0, opnd); // Need to generate 64-bit relocation
@@ -3470,7 +3485,7 @@ void TurboAssembler::StoreP(Register src, const MemOperand& mem,
void TurboAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
- DCHECK_EQ(opnd.rmode(), kRelocInfo_NONEPTR);
+ DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
@@ -4269,6 +4284,10 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
}
#endif
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ mov(kSpeculationPoisonRegister, Operand(-1));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index fcc62f21a9..1c3ea3fc54 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -14,20 +14,23 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = r2;
-const Register kReturnRegister1 = r3;
-const Register kReturnRegister2 = r4;
-const Register kJSFunctionRegister = r3;
-const Register kContextRegister = r13;
-const Register kAllocateSizeRegister = r3;
-const Register kInterpreterAccumulatorRegister = r2;
-const Register kInterpreterBytecodeOffsetRegister = r6;
-const Register kInterpreterBytecodeArrayRegister = r7;
-const Register kInterpreterDispatchTableRegister = r8;
-const Register kJavaScriptCallArgCountRegister = r2;
-const Register kJavaScriptCallNewTargetRegister = r5;
-const Register kRuntimeCallFunctionRegister = r3;
-const Register kRuntimeCallArgCountRegister = r2;
+constexpr Register kReturnRegister0 = r2;
+constexpr Register kReturnRegister1 = r3;
+constexpr Register kReturnRegister2 = r4;
+constexpr Register kJSFunctionRegister = r3;
+constexpr Register kContextRegister = r13;
+constexpr Register kAllocateSizeRegister = r3;
+constexpr Register kSpeculationPoisonRegister = r9;
+constexpr Register kInterpreterAccumulatorRegister = r2;
+constexpr Register kInterpreterBytecodeOffsetRegister = r6;
+constexpr Register kInterpreterBytecodeArrayRegister = r7;
+constexpr Register kInterpreterDispatchTableRegister = r8;
+constexpr Register kJavaScriptCallArgCountRegister = r2;
+constexpr Register kJavaScriptCallNewTargetRegister = r5;
+constexpr Register kJavaScriptCallCodeStartRegister = r4;
+constexpr Register kOffHeapTrampolineRegister = ip;
+constexpr Register kRuntimeCallFunctionRegister = r3;
+constexpr Register kRuntimeCallArgCountRegister = r2;
// ----------------------------------------------------------------------------
// Static helper functions
@@ -1001,6 +1004,8 @@ class TurboAssembler : public Assembler {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
+ void ResetSpeculationPoisonRegister();
+
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1082,6 +1087,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
CompareRoot(with, index);
@@ -1139,10 +1147,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// Frame restart support
void MaybeDropFrames();
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index f6754bdd4b..73ca0d5a8a 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -640,7 +640,7 @@ void S390Debugger::Debug() {
#undef XSTR
}
-static bool ICacheMatch(void* one, void* two) {
+bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@@ -1488,11 +1488,6 @@ void Simulator::EvalTableInit() {
} // NOLINT
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == nullptr) {
- i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
static base::OnceType once = V8_ONCE_INIT;
base::CallOnce(&once, &Simulator::EvalTableInit);
// Set up simulator support first. Some of this information is needed to
@@ -2332,7 +2327,7 @@ void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
icount_++;
if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
+ CheckICache(i_cache(), instr);
}
pc_modified_ = false;
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index 1ff8020e6a..7c4eb74b6c 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -187,6 +187,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
+ static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@@ -425,9 +426,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
- // Icache simulation
- base::CustomMatcherHashMap* i_cache_;
-
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 83031a2f36..176693d2aa 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -51,7 +51,7 @@ SafepointTable::SafepointTable(Address instruction_start,
}
SafepointTable::SafepointTable(Code* code)
- : SafepointTable(code->instruction_start(), code->safepoint_table_offset(),
+ : SafepointTable(code->InstructionStart(), code->safepoint_table_offset(),
code->stack_slots(), true) {}
unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
diff --git a/deps/v8/src/simulator-base.cc b/deps/v8/src/simulator-base.cc
index 72a5daefce..f075ad72ac 100644
--- a/deps/v8/src/simulator-base.cc
+++ b/deps/v8/src/simulator-base.cc
@@ -20,9 +20,21 @@ base::Mutex* SimulatorBase::redirection_mutex_ = nullptr;
Redirection* SimulatorBase::redirection_ = nullptr;
// static
+base::Mutex* SimulatorBase::i_cache_mutex_ = nullptr;
+
+// static
+base::CustomMatcherHashMap* SimulatorBase::i_cache_ = nullptr;
+
+// static
void SimulatorBase::InitializeOncePerProcess() {
DCHECK_NULL(redirection_mutex_);
redirection_mutex_ = new base::Mutex();
+
+ DCHECK_NULL(i_cache_mutex_);
+ i_cache_mutex_ = new base::Mutex();
+
+ DCHECK_NULL(i_cache_);
+ i_cache_ = new base::CustomMatcherHashMap(&Simulator::ICacheMatch);
}
// static
@@ -32,40 +44,40 @@ void SimulatorBase::GlobalTearDown() {
Redirection::DeleteChain(redirection_);
redirection_ = nullptr;
-}
-// static
-void SimulatorBase::Initialize(Isolate* isolate) {
- ExternalReference::set_redirector(isolate, &RedirectExternalReference);
-}
+ delete i_cache_mutex_;
+ i_cache_mutex_ = nullptr;
-// static
-void SimulatorBase::TearDown(base::CustomMatcherHashMap* i_cache) {
- if (i_cache != nullptr) {
- for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
- entry = i_cache->Next(entry)) {
+ if (i_cache_ != nullptr) {
+ for (base::HashMap::Entry* entry = i_cache_->Start(); entry != nullptr;
+ entry = i_cache_->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
- delete i_cache;
}
+ delete i_cache_;
+ i_cache_ = nullptr;
+}
+
+// static
+void SimulatorBase::Initialize(Isolate* isolate) {
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
}
// static
-void* SimulatorBase::RedirectExternalReference(Isolate* isolate,
- void* external_function,
+void* SimulatorBase::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
base::LockGuard<base::Mutex> lock_guard(Simulator::redirection_mutex());
- Redirection* redirection = Redirection::Get(isolate, external_function, type);
+ Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_instruction();
}
-Redirection::Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type)
+Redirection::Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function), type_(type), next_(nullptr) {
next_ = Simulator::redirection();
+ base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
Simulator::SetRedirectInstruction(
reinterpret_cast<Instruction*>(address_of_instruction()));
- Simulator::FlushICache(isolate->simulator_i_cache(),
+ Simulator::FlushICache(Simulator::i_cache(),
reinterpret_cast<void*>(&instruction_),
sizeof(instruction_));
Simulator::set_redirection(this);
@@ -77,7 +89,7 @@ Redirection::Redirection(Isolate* isolate, void* external_function,
}
// static
-Redirection* Redirection::Get(Isolate* isolate, void* external_function,
+Redirection* Redirection::Get(void* external_function,
ExternalReference::Type type) {
Redirection* current = Simulator::redirection();
for (; current != nullptr; current = current->next_) {
@@ -86,7 +98,7 @@ Redirection* Redirection::Get(Isolate* isolate, void* external_function,
return current;
}
}
- return new Redirection(isolate, external_function, type);
+ return new Redirection(external_function, type);
}
} // namespace internal
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/simulator-base.h
index 84c1f2fd5b..47a6b1a52c 100644
--- a/deps/v8/src/simulator-base.h
+++ b/deps/v8/src/simulator-base.h
@@ -24,26 +24,28 @@ class SimulatorBase {
static void InitializeOncePerProcess();
static void GlobalTearDown();
- // Call on isolate initialization and teardown.
+ // Call on isolate initialization.
static void Initialize(Isolate* isolate);
- static void TearDown(base::CustomMatcherHashMap* i_cache);
static base::Mutex* redirection_mutex() { return redirection_mutex_; }
static Redirection* redirection() { return redirection_; }
static void set_redirection(Redirection* r) { redirection_ = r; }
+ static base::Mutex* i_cache_mutex() { return i_cache_mutex_; }
+ static base::CustomMatcherHashMap* i_cache() { return i_cache_; }
+
protected:
template <typename Return, typename SimT, typename CallImpl, typename... Args>
static Return VariadicCall(SimT* sim, CallImpl call, byte* entry,
Args... args) {
// Convert all arguments to intptr_t. Fails if any argument is not integral
// or pointer.
- std::array<intptr_t, sizeof...(args)> args_arr{ConvertArg(args)...};
+ std::array<intptr_t, sizeof...(args)> args_arr{{ConvertArg(args)...}};
intptr_t ret = (sim->*call)(entry, args_arr.size(), args_arr.data());
return ConvertReturn<Return>(ret);
}
- // Convert back integral return types.
+ // Convert back integral return types. This is always a narrowing conversion.
template <typename T>
static typename std::enable_if<std::is_integral<T>::value, T>::type
ConvertReturn(intptr_t ret) {
@@ -64,14 +66,16 @@ class SimulatorBase {
intptr_t ret) {}
private:
- // Runtime call support. Uses the isolate in a thread-safe way.
- static void* RedirectExternalReference(Isolate* isolate,
- void* external_function,
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
static base::Mutex* redirection_mutex_;
static Redirection* redirection_;
+ static base::Mutex* i_cache_mutex_;
+ static base::CustomMatcherHashMap* i_cache_;
+
// Helper methods to convert arbitrary integer or pointer arguments to the
// needed generic argument type intptr_t.
@@ -80,7 +84,16 @@ class SimulatorBase {
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
ConvertArg(T arg) {
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
+#if V8_TARGET_ARCH_MIPS64
+ // The MIPS64 calling convention is to sign extend all values, even unsigned
+ // ones.
+ using signed_t = typename std::make_signed<T>::type;
+ return static_cast<intptr_t>(static_cast<signed_t>(arg));
+#else
+ // Standard C++ convertion: Sign-extend signed values, zero-extend unsigned
+ // values.
return static_cast<intptr_t>(arg);
+#endif
}
// Convert pointer-typed argument to intptr_t.
@@ -108,8 +121,7 @@ class SimulatorBase {
// - V8_TARGET_ARCH_S390: svc (Supervisor Call)
class Redirection {
public:
- Redirection(Isolate* isolate, void* external_function,
- ExternalReference::Type type);
+ Redirection(void* external_function, ExternalReference::Type type);
Address address_of_instruction() {
#if ABI_USES_FUNCTION_DESCRIPTORS
@@ -122,7 +134,7 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(Isolate* isolate, void* external_function,
+ static Redirection* Get(void* external_function,
ExternalReference::Type type);
static Redirection* FromInstruction(Instruction* instruction) {
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
index 53a0f30612..e8e086fca3 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -109,14 +109,35 @@ void BuiltinDeserializer::DeserializeEagerBuiltinsAndHandlers() {
Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
allocator()->ReserveAndInitializeBuiltinsTableForBuiltin(builtin_id);
DisallowHeapAllocation no_gc;
- return DeserializeBuiltinRaw(builtin_id);
+ Code* code = DeserializeBuiltinRaw(builtin_id);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ DCHECK(isolate()->builtins()->is_initialized());
+ OFStream os(stdout);
+ code->Disassemble(Builtins::name(builtin_id), os);
+ os << std::flush;
+ }
+#endif // ENABLE_DISASSEMBLER
+
+ return code;
}
Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
OperandScale operand_scale) {
allocator()->ReserveForHandler(bytecode, operand_scale);
DisallowHeapAllocation no_gc;
- return DeserializeHandlerRaw(bytecode, operand_scale);
+ Code* code = DeserializeHandlerRaw(bytecode, operand_scale);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ OFStream os(stdout);
+ code->Disassemble(Bytecodes::ToString(bytecode), os);
+ os << std::flush;
+ }
+#endif // ENABLE_DISASSEMBLER
+
+ return code;
}
Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
@@ -136,8 +157,7 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
// Flush the instruction cache.
Code* code = Code::cast(o);
- Assembler::FlushICache(isolate(), code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
return code;
}
@@ -161,8 +181,7 @@ Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
// Flush the instruction cache.
Code* code = Code::cast(o);
- Assembler::FlushICache(isolate(), code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
return code;
}
diff --git a/deps/v8/src/snapshot/builtin-deserializer.h b/deps/v8/src/snapshot/builtin-deserializer.h
index 38ba2fecea..1ae49686b8 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.h
+++ b/deps/v8/src/snapshot/builtin-deserializer.h
@@ -56,7 +56,8 @@ class BuiltinDeserializer final
// BuiltinDeserializer implements its own builtin iteration logic. Make sure
// the RootVisitor API is not used accidentally.
- void VisitRootPointers(Root root, Object** start, Object** end) override {
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
UNREACHABLE();
}
diff --git a/deps/v8/src/snapshot/builtin-serializer.cc b/deps/v8/src/snapshot/builtin-serializer.cc
index 893c79c05e..0109a85b6b 100644
--- a/deps/v8/src/snapshot/builtin-serializer.cc
+++ b/deps/v8/src/snapshot/builtin-serializer.cc
@@ -65,8 +65,8 @@ void BuiltinSerializer::SerializeBuiltinsAndHandlers() {
sink_.PutRaw(data, data_length, "BuiltinOffsets");
}
-void BuiltinSerializer::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void BuiltinSerializer::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
UNREACHABLE(); // We iterate manually in SerializeBuiltins.
}
diff --git a/deps/v8/src/snapshot/builtin-serializer.h b/deps/v8/src/snapshot/builtin-serializer.h
index bb8bbdebfa..abc8be74e5 100644
--- a/deps/v8/src/snapshot/builtin-serializer.h
+++ b/deps/v8/src/snapshot/builtin-serializer.h
@@ -28,7 +28,8 @@ class BuiltinSerializer : public Serializer<BuiltinSerializerAllocator> {
void SerializeBuiltinsAndHandlers();
private:
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void SerializeBuiltin(Code* code);
void SerializeHandler(Code* code);
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 4210845573..8126e9ee2c 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -22,6 +22,17 @@
namespace v8 {
namespace internal {
+ScriptData::ScriptData(const byte* data, int length)
+ : owns_data_(false), rejected_(false), data_(data), length_(length) {
+ if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
+ byte* copy = NewArray<byte>(length);
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
+ CopyBytes(copy, data, length);
+ data_ = copy;
+ AcquireDataOwnership();
+ }
+}
+
ScriptData* CodeSerializer::Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,
Handle<String> source) {
@@ -52,7 +63,8 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
DisallowHeapAllocation no_gc;
- VisitRootPointer(Root::kHandleScope, Handle<Object>::cast(obj).location());
+ VisitRootPointer(Root::kHandleScope, nullptr,
+ Handle<Object>::cast(obj).location());
SerializeDeferredObjects();
Pad();
@@ -134,14 +146,16 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
- // Do not serialize when a debugger is active.
- DCHECK(sfi->debug_info()->IsSmi());
+ // Clear debug info.
+ Object* debug_info = sfi->debug_info();
+ sfi->set_debug_info(Smi::kZero);
// Mark SFI to indicate whether the code is cached.
bool was_deserialized = sfi->deserialized();
sfi->set_deserialized(sfi->is_compiled());
SerializeGeneric(obj, how_to_code, where_to_point);
sfi->set_deserialized(was_deserialized);
+ sfi->set_debug_info(debug_info);
return;
}
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index edc1c2bf1d..8dd5131eb1 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -11,6 +11,38 @@
namespace v8 {
namespace internal {
+class ScriptData {
+ public:
+ ScriptData(const byte* data, int length);
+ ~ScriptData() {
+ if (owns_data_) DeleteArray(data_);
+ }
+
+ const byte* data() const { return data_; }
+ int length() const { return length_; }
+ bool rejected() const { return rejected_; }
+
+ void Reject() { rejected_ = true; }
+
+ void AcquireDataOwnership() {
+ DCHECK(!owns_data_);
+ owns_data_ = true;
+ }
+
+ void ReleaseDataOwnership() {
+ DCHECK(owns_data_);
+ owns_data_ = false;
+ }
+
+ private:
+ bool owns_data_ : 1;
+ bool rejected_ : 1;
+ const byte* data_;
+ int length_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScriptData);
+};
+
class CodeSerializer : public Serializer<> {
public:
static ScriptData* Serialize(Isolate* isolate,
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 4b51e89e85..d1e200ef1e 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -60,8 +60,9 @@ Deserializer<AllocatorT>::~Deserializer() {
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
template <class AllocatorT>
-void Deserializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void Deserializer<AllocatorT>::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
// Builtins and bytecode handlers are deserialized in a separate pass by the
// BuiltinDeserializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
@@ -246,11 +247,12 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
// fields in the serializer.
BytecodeArray* bytecode_array = BytecodeArray::cast(obj);
bytecode_array->set_interrupt_budget(
- interpreter::Interpreter::kInterruptBudget);
+ interpreter::Interpreter::InterruptBudget());
bytecode_array->set_osr_loop_nesting_level(0);
}
// Check alignment.
- DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
+ HeapObject::RequiredAlignment(obj->map())));
return obj;
}
@@ -378,8 +380,11 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
CASE_STATEMENT(where, how, within, NEW_SPACE) \
CASE_BODY(where, how, within, NEW_SPACE) \
CASE_STATEMENT(where, how, within, OLD_SPACE) \
+ V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, MAP_SPACE) \
+ V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, LO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
@@ -480,9 +485,9 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
Address pc = code->entry() + pc_offset;
Address target = code->entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
- isolate, pc, target, data == kInternalReference
- ? RelocInfo::INTERNAL_REFERENCE
- : RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ pc, target,
+ data == kInternalReference ? RelocInfo::INTERNAL_REFERENCE
+ : RelocInfo::INTERNAL_REFERENCE_ENCODED);
break;
}
@@ -585,7 +590,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
int skip = source_.GetInt();
current = reinterpret_cast<Object**>(
reinterpret_cast<intptr_t>(current) + skip);
- // Fall through.
+ V8_FALLTHROUGH;
}
SIXTEEN_CASES(kRootArrayConstants)
@@ -604,7 +609,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
int skip = source_.GetInt();
current = reinterpret_cast<Object**>(
reinterpret_cast<Address>(current) + skip);
- // Fall through.
+ V8_FALLTHROUGH;
}
FOUR_CASES(kHotObject)
@@ -643,12 +648,17 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
break;
}
+#ifdef DEBUG
+#define UNUSED_CASE(byte_code) \
+ case byte_code: \
+ UNREACHABLE();
+ UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
+#endif
+#undef UNUSED_CASE
+
#undef SIXTEEN_CASES
#undef FOUR_CASES
#undef SINGLE_CASE
-
- default:
- UNREACHABLE();
}
}
CHECK_EQ(limit, current);
@@ -746,7 +756,7 @@ Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
if (how == kFromCode) {
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::deserialization_set_special_target_at(
- isolate, location_of_branch_data,
+ location_of_branch_data,
Code::cast(HeapObject::FromAddress(current_object_address)),
reinterpret_cast<Address>(new_object));
location_of_branch_data += Assembler::kSpecialTargetSize;
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 5c9bda43ac..cd563e46a1 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -96,7 +96,8 @@ class Deserializer : public SerializerDeserializer {
void Rehash();
private:
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 3f92e7757f..bd8757e318 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -76,7 +76,7 @@ MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
Object* root;
- VisitRootPointer(Root::kPartialSnapshotCache, &root);
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &root);
DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
result = Handle<HeapObject>(HeapObject::cast(root));
@@ -93,8 +93,7 @@ void ObjectDeserializer::
for (Code* code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
isolate()->heap()->RecordWritesIntoCode(code);
- Assembler::FlushICache(isolate(), code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
}
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 41df5dbba7..6446f5e93f 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -42,7 +42,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
OldSpace* code_space = isolate->heap()->code_space();
Address start_address = code_space->top();
Object* root;
- VisitRootPointer(Root::kPartialSnapshotCache, &root);
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &root);
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index baac565a11..6661d9799f 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -42,7 +42,8 @@ void PartialSerializer::Serialize(Context** o, bool include_global_proxy) {
context_->set_math_random_index(Smi::kZero);
context_->set_math_random_cache(isolate()->heap()->undefined_value());
- VisitRootPointer(Root::kPartialSnapshotCache, reinterpret_cast<Object**>(o));
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
+ reinterpret_cast<Object**>(o));
SerializeDeferredObjects();
SerializeEmbedderFields();
Pad();
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index 71436fe8fd..d928b02ba1 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -111,7 +111,8 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
if (cache->size() <= i) cache->push_back(Smi::kZero);
// During deserialization, the visitor populates the partial snapshot cache
// and eventually terminates the cache with undefined.
- visitor->VisitRootPointer(Root::kPartialSnapshotCache, &cache->at(i));
+ visitor->VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
+ &cache->at(i));
if (cache->at(i)->IsUndefined(isolate)) break;
}
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 7d3d66a08d..f68694d5b8 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -116,6 +116,40 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo*>& call_handler_infos);
+#define UNUSED_SERIALIZER_BYTE_CODES(V) \
+ V(0x1d) \
+ V(0x1e) \
+ V(0x55) \
+ V(0x56) \
+ V(0x57) \
+ V(0x75) \
+ V(0x76) \
+ V(0x77) \
+ V(0x78) \
+ V(0x79) \
+ V(0x7a) \
+ V(0x7b) \
+ V(0x7c) \
+ V(0x7d) \
+ V(0x7e) \
+ V(0x7f) \
+ V(0xf0) \
+ V(0xf1) \
+ V(0xf2) \
+ V(0xf3) \
+ V(0xf4) \
+ V(0xf5) \
+ V(0xf6) \
+ V(0xf7) \
+ V(0xf8) \
+ V(0xf9) \
+ V(0xfa) \
+ V(0xfb) \
+ V(0xfc) \
+ V(0xfd) \
+ V(0xfe) \
+ V(0xff)
+
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
// Where the pointed-to object can be found:
@@ -196,8 +230,6 @@ class SerializerDeserializer : public RootVisitor {
// Used for embedder-allocated backing stores for TypedArrays.
static const int kOffHeapBackingStore = 0x1c;
- // 0x1d, 0x1e unused.
-
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
@@ -217,8 +249,6 @@ class SerializerDeserializer : public RootVisitor {
static const int kHotObjectWithSkip = 0x58;
static const int kHotObjectMask = 0x07;
- // 0x55..0x57, 0x75..0x7f unused.
-
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
static const int kNumberOfRootArrayConstants = 0x20;
@@ -241,8 +271,6 @@ class SerializerDeserializer : public RootVisitor {
static const int kFixedRepeat = 0xe0;
static const int kFixedRepeatStart = kFixedRepeat - 1;
- // 0xf0..0xff unused.
-
// ---------- special values ----------
static const int kAnyOldSpace = -1;
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 87e4fe8fdc..b477227154 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -92,8 +92,9 @@ bool Serializer<AllocatorT>::MustBeDeferred(HeapObject* object) {
}
template <class AllocatorT>
-void Serializer<AllocatorT>::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void Serializer<AllocatorT>::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
// Builtins and bytecode handlers are serialized in a separate pass by the
// BuiltinSerializer.
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
@@ -283,7 +284,7 @@ void Serializer<AllocatorT>::PutAttachedReference(SerializerReference reference,
template <class AllocatorT>
int Serializer<AllocatorT>::PutAlignmentPrefix(HeapObject* object) {
- AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(object->map());
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
byte prefix = (kAlignmentPrefix - 1) + alignment;
@@ -885,7 +886,7 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
- rinfo->WipeOut(serializer_->isolate());
+ rinfo->WipeOut();
}
// We need to wipe out the header fields *after* wiping out the
// relocations, because some of these fields are needed for the latter.
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 22dcb26c8c..586c8802c0 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -7,6 +7,7 @@
#include <map>
+#include "src/instruction-stream.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/objects.h"
@@ -116,6 +117,15 @@ class CodeAddressMap : public CodeEventLogger {
const char* name, int length) override {
address_to_name_map_.Insert(code->address(), name, length);
}
+ void LogRecordedBuffer(const InstructionStream* stream, const char* name,
+ int length) override {
+ address_to_name_map_.Insert(stream->bytes(), name, length);
+ }
+
+ void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ int length) override {
+ UNREACHABLE();
+ }
NameMap address_to_name_map_;
Isolate* isolate_;
@@ -162,7 +172,8 @@ class Serializer : public SerializerDeserializer {
virtual bool MustBeDeferred(HeapObject* object);
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip);
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 2bf50cc748..534339b2e5 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -89,6 +89,10 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
// static
Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing builtin %s\n", Builtins::name(builtin_id));
+ }
+
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -116,9 +120,33 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
}
// static
+void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
+ if (!FLAG_lazy_deserialization) return;
+
+ Builtins* builtins = isolate->builtins();
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsLazy(i)) continue;
+
+ DCHECK_NE(Builtins::kDeserializeLazy, i);
+ Code* code = builtins->builtin(i);
+ if (code->builtin_index() == Builtins::kDeserializeLazy) {
+ code = Snapshot::DeserializeBuiltin(isolate, i);
+ }
+
+ DCHECK_EQ(i, code->builtin_index());
+ DCHECK_EQ(code, builtins->builtin(i));
+ }
+}
+
+// static
Code* Snapshot::DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Lazy-deserializing handler %s\n",
+ interpreter::Bytecodes::ToString(bytecode, operand_scale).c_str());
+ }
+
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -312,16 +340,16 @@ void Snapshot::CheckVersion(const v8::StartupData* data) {
CHECK_LT(kVersionStringOffset + kVersionStringLength,
static_cast<uint32_t>(data->raw_size));
Version::GetString(Vector<char>(version, kVersionStringLength));
- if (memcmp(version, data->data + kVersionStringOffset,
- kVersionStringLength) != 0) {
- V8_Fatal(__FILE__, __LINE__,
- "Version mismatch between V8 binary and snapshot.\n"
- "# V8 binary version: %.*s\n"
- "# Snapshot version: %.*s\n"
- "# The snapshot consists of %d bytes and contains %d context(s).",
- kVersionStringLength, version, kVersionStringLength,
- data->data + kVersionStringOffset, data->raw_size,
- ExtractNumContexts(data));
+ if (strncmp(version, data->data + kVersionStringOffset,
+ kVersionStringLength) != 0) {
+ FATAL(
+ "Version mismatch between V8 binary and snapshot.\n"
+ "# V8 binary version: %.*s\n"
+ "# Snapshot version: %.*s\n"
+ "# The snapshot consists of %d bytes and contains %d context(s).",
+ kVersionStringLength, version, kVersionStringLength,
+ data->data + kVersionStringOffset, data->raw_size,
+ ExtractNumContexts(data));
}
}
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 8f37e00c4a..23d6e3689f 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -97,6 +97,7 @@ class Snapshot : public AllStatic {
// runtime after the isolate (and the builtins table) has been fully
// initialized.
static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
+ static void EnsureAllBuiltinsAreDeserialized(Isolate* isolate);
// Deserializes a single given handler code object. Intended to be called at
// runtime after the isolate has been fully initialized.
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index e6f853fe0e..d0369984b8 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -78,8 +78,7 @@ void StartupDeserializer::FlushICacheForNewIsolate() {
DCHECK(!deserializing_user_code());
// The entire isolate is newly deserialized. Simply flush all code pages.
for (Page* p : *isolate()->heap()->code_space()) {
- Assembler::FlushICache(isolate(), p->area_start(),
- p->area_end() - p->area_start());
+ Assembler::FlushICache(p->area_start(), p->area_end() - p->area_start());
}
}
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 5ae6e33b87..b02d572595 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/startup-serializer.h"
#include "src/api.h"
+#include "src/global-handles.h"
#include "src/objects-inl.h"
#include "src/v8threads.h"
@@ -94,7 +95,7 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// add entries to the partial snapshot cache of the startup snapshot. Add
// one entry with 'undefined' to terminate the partial snapshot cache.
Object* undefined = isolate()->heap()->undefined_value();
- VisitRootPointer(Root::kPartialSnapshotCache, &undefined);
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr, &undefined);
isolate()->heap()->IterateWeakRoots(this, VISIT_FOR_SERIALIZATION);
SerializeDeferredObjects();
Pad();
@@ -106,7 +107,7 @@ int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
// This object is not part of the partial snapshot cache yet. Add it to the
// startup snapshot so we can refer to it via partial snapshot index from
// the partial snapshot.
- VisitRootPointer(Root::kPartialSnapshotCache,
+ VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
reinterpret_cast<Object**>(&heap_object));
}
return index;
@@ -133,8 +134,8 @@ void StartupSerializer::SerializeStrongReferences() {
isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
}
-void StartupSerializer::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void StartupSerializer::VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) {
if (start == isolate()->heap()->roots_array_start()) {
// Serializing the root list needs special handling:
// - The first pass over the root list only serializes immortal immovables.
@@ -156,7 +157,7 @@ void StartupSerializer::VisitRootPointers(Root root, Object** start,
}
FlushSkip(skip);
} else {
- Serializer::VisitRootPointers(root, start, end);
+ Serializer::VisitRootPointers(root, description, start, end);
}
}
@@ -197,8 +198,9 @@ void SerializedHandleChecker::AddToSet(FixedArray* serialized) {
for (int i = 0; i < length; i++) serialized_.insert(serialized->get(i));
}
-void SerializedHandleChecker::VisitRootPointers(Root root, Object** start,
- Object** end) {
+void SerializedHandleChecker::VisitRootPointers(Root root,
+ const char* description,
+ Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (serialized_.find(*p) != serialized_.end()) continue;
PrintF("%s handle not serialized: ",
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index ae2a9f49df..ad440965b0 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -63,7 +63,8 @@ class StartupSerializer : public Serializer<> {
// The StartupSerializer has to serialize the root array, which is slightly
// different.
- void VisitRootPointers(Root root, Object** start, Object** end) override;
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override;
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
@@ -86,7 +87,8 @@ class StartupSerializer : public Serializer<> {
class SerializedHandleChecker : public RootVisitor {
public:
SerializedHandleChecker(Isolate* isolate, std::vector<Context*>* contexts);
- virtual void VisitRootPointers(Root root, Object** start, Object** end);
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end);
bool CheckGlobalAndEternalHandles();
private:
diff --git a/deps/v8/src/string-case.h b/deps/v8/src/string-case.h
index 3fe3bc2b81..f57bae494f 100644
--- a/deps/v8/src/string-case.h
+++ b/deps/v8/src/string-case.h
@@ -14,4 +14,4 @@ int FastAsciiConvert(char* dst, const char* src, int length, bool* changed_out);
} // namespace internal
} // namespace v8
-#endif // V8_STRING_CASE_H__
+#endif // V8_STRING_CASE_H_
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
deleted file mode 100644
index aaf521f310..0000000000
--- a/deps/v8/src/third_party/vtune/v8vtune.gyp
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- },
- 'includes': ['../../../gypfiles/toolchain.gypi', '../../../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'v8_vtune',
- 'type': 'static_library',
- 'dependencies': [
- '../../v8.gyp:v8',
- ],
- 'sources': [
- 'ittnotify_config.h',
- 'ittnotify_types.h',
- 'jitprofiling.cc',
- 'jitprofiling.h',
- 'v8-vtune.h',
- 'vtune-jit.cc',
- 'vtune-jit.h',
- ],
- 'direct_dependent_settings': {
- 'defines': ['ENABLE_VTUNE_JIT_INTERFACE',],
- 'conditions': [
- ['OS != "win"', {
- 'libraries': ['-ldl',],
- }],
- ],
- },
- },
- ],
-}
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index bc73996be9..bd56f8a555 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SRC_TRACING_TRACE_EVENT_H_
-#define SRC_TRACING_TRACE_EVENT_H_
+#ifndef V8_TRACING_TRACE_EVENT_H_
+#define V8_TRACING_TRACE_EVENT_H_
#include <stddef.h>
#include <memory>
@@ -686,4 +686,4 @@ class CallStatsScopedTracer {
} // namespace internal
} // namespace v8
-#endif // SRC_TRACING_TRACE_EVENT_H_
+#endif // V8_TRACING_TRACE_EVENT_H_
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 2c9225d485..4dc7057782 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -34,10 +34,10 @@
namespace {
size_t gNextCodeObject = 0;
-#if defined(DEBUG)
-const bool kEnableDebug = true;
+#ifdef DEBUG
+constexpr bool kEnableDebug = true;
#else
-const bool kEnableDebug = false;
+constexpr bool kEnableDebug = false;
#endif
}
@@ -54,7 +54,7 @@ constexpr size_t HandlerDataSize(size_t num_protected_instructions) {
}
namespace {
-template <typename = std::enable_if<kEnableDebug>>
+#ifdef DEBUG
bool IsDisjoint(const CodeProtectionInfo* a, const CodeProtectionInfo* b) {
if (a == nullptr || b == nullptr) {
return true;
@@ -65,6 +65,7 @@ bool IsDisjoint(const CodeProtectionInfo* a, const CodeProtectionInfo* b) {
return a_base >= b_base + b->size || b_base >= a_base + a->size;
}
+#endif
// Verify that the code range does not overlap any that have already been
// registered.
@@ -181,6 +182,7 @@ int RegisterHandlerData(
new_size = int_max;
}
if (new_size == gNumCodeObjects) {
+ free(data);
return kInvalidIndex;
}
@@ -215,6 +217,7 @@ int RegisterHandlerData(
return static_cast<int>(i);
} else {
+ free(data);
return kInvalidIndex;
}
}
diff --git a/deps/v8/src/trap-handler/trap-handler-internal.h b/deps/v8/src/trap-handler/trap-handler-internal.h
index 1476eb844b..a8cc371c36 100644
--- a/deps/v8/src/trap-handler/trap-handler-internal.h
+++ b/deps/v8/src/trap-handler/trap-handler-internal.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef TRAP_HANDLER_INTERNAL_H_
-#define TRAP_HANDLER_INTERNAL_H_
+#ifndef V8_TRAP_HANDLER_TRAP_HANDLER_INTERNAL_H_
+#define V8_TRAP_HANDLER_TRAP_HANDLER_INTERNAL_H_
// This file should not be included (even transitively) by files outside of
// src/trap-handler.
@@ -79,4 +79,4 @@ extern bool g_is_default_signal_handler_registered;
} // namespace internal
} // namespace v8
-#endif // TRAP_HANDLER_INTERNAL_H_
+#endif // V8_TRAP_HANDLER_TRAP_HANDLER_INTERNAL_H_
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 1e02eeb34c..d410a19322 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TRAP_HANDLER_H_
-#define V8_TRAP_HANDLER_H_
+#ifndef V8_TRAP_HANDLER_TRAP_HANDLER_H_
+#define V8_TRAP_HANDLER_TRAP_HANDLER_H_
#include <signal.h>
#include <stdint.h>
@@ -101,4 +101,4 @@ size_t GetRecoveredTrapCount();
} // namespace internal
} // namespace v8
-#endif // V8_TRAP_HANDLER_H_
+#endif // V8_TRAP_HANDLER_TRAP_HANDLER_H_
diff --git a/deps/v8/src/unicode-decoder.cc b/deps/v8/src/unicode-decoder.cc
index d2360b6c68..6074bae81d 100644
--- a/deps/v8/src/unicode-decoder.cc
+++ b/deps/v8/src/unicode-decoder.cc
@@ -10,74 +10,78 @@
namespace unibrow {
+uint16_t Utf8Iterator::operator*() {
+ if (V8_UNLIKELY(char_ > Utf16::kMaxNonSurrogateCharCode)) {
+ return trailing_ ? Utf16::TrailSurrogate(char_)
+ : Utf16::LeadSurrogate(char_);
+ }
+
+ DCHECK_EQ(trailing_, false);
+ return char_;
+}
+
+Utf8Iterator& Utf8Iterator::operator++() {
+ if (V8_UNLIKELY(this->Done())) {
+ char_ = Utf8::kBufferEmpty;
+ return *this;
+ }
+
+ if (V8_UNLIKELY(char_ > Utf16::kMaxNonSurrogateCharCode && !trailing_)) {
+ trailing_ = true;
+ return *this;
+ }
+
+ trailing_ = false;
+ offset_ = cursor_;
+
+ char_ =
+ Utf8::ValueOf(reinterpret_cast<const uint8_t*>(stream_.begin()) + cursor_,
+ stream_.length() - cursor_, &cursor_);
+ return *this;
+}
+
+Utf8Iterator Utf8Iterator::operator++(int) {
+ Utf8Iterator old(*this);
+ ++*this;
+ return old;
+}
+
+bool Utf8Iterator::Done() {
+ return offset_ == static_cast<size_t>(stream_.length());
+}
+
void Utf8DecoderBase::Reset(uint16_t* buffer, size_t buffer_length,
- const uint8_t* stream, size_t stream_length) {
- // Assume everything will fit in the buffer and stream won't be needed.
- last_byte_of_buffer_unused_ = false;
- unbuffered_start_ = nullptr;
- unbuffered_length_ = 0;
- bool writing_to_buffer = true;
- // Loop until stream is read, writing to buffer as long as buffer has space.
+ const v8::internal::Vector<const char>& stream) {
size_t utf16_length = 0;
- while (stream_length != 0) {
- size_t cursor = 0;
- uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
- DCHECK(cursor > 0 && cursor <= stream_length);
- stream += cursor;
- stream_length -= cursor;
- bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode;
- utf16_length += is_two_characters ? 2 : 1;
- // Don't need to write to the buffer, but still need utf16_length.
- if (!writing_to_buffer) continue;
- // Write out the characters to the buffer.
- // Must check for equality with buffer_length as we've already updated it.
- if (utf16_length <= buffer_length) {
- if (is_two_characters) {
- *buffer++ = Utf16::LeadSurrogate(character);
- *buffer++ = Utf16::TrailSurrogate(character);
- } else {
- *buffer++ = character;
- }
- if (utf16_length == buffer_length) {
- // Just wrote last character of buffer
- writing_to_buffer = false;
- unbuffered_start_ = stream;
- unbuffered_length_ = stream_length;
- }
- continue;
- }
- // Have gone over buffer.
- // Last char of buffer is unused, set cursor back.
- DCHECK(is_two_characters);
- writing_to_buffer = false;
- last_byte_of_buffer_unused_ = true;
- unbuffered_start_ = stream - cursor;
- unbuffered_length_ = stream_length + cursor;
+
+ Utf8Iterator it = Utf8Iterator(stream);
+ // Loop until stream is read, writing to buffer as long as buffer has space.
+ while (utf16_length < buffer_length && !it.Done()) {
+ *buffer++ = *it;
+ ++it;
+ utf16_length++;
+ }
+ bytes_read_ = it.Offset();
+ trailing_ = it.Trailing();
+ chars_written_ = utf16_length;
+
+ // Now that writing to buffer is done, we just need to calculate utf16_length
+ while (!it.Done()) {
+ ++it;
+ utf16_length++;
}
utf16_length_ = utf16_length;
}
-
-void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
- size_t stream_length, uint16_t* data,
- size_t data_length) {
- while (data_length != 0) {
- size_t cursor = 0;
- uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
- // There's a total lack of bounds checking for stream
- // as it was already done in Reset.
- stream += cursor;
- DCHECK(stream_length >= cursor);
- stream_length -= cursor;
- if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- *data++ = Utf16::LeadSurrogate(character);
- *data++ = Utf16::TrailSurrogate(character);
- DCHECK_GT(data_length, 1);
- data_length -= 2;
- } else {
- *data++ = character;
- data_length -= 1;
- }
+void Utf8DecoderBase::WriteUtf16Slow(
+ uint16_t* data, size_t length,
+ const v8::internal::Vector<const char>& stream, size_t offset,
+ bool trailing) {
+ Utf8Iterator it = Utf8Iterator(stream, offset, trailing);
+ while (!it.Done()) {
+ DCHECK_GT(length--, 0);
+ *data++ = *it;
+ ++it;
}
}
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
index 38a1837af3..ab69d0d390 100644
--- a/deps/v8/src/unicode-decoder.h
+++ b/deps/v8/src/unicode-decoder.h
@@ -6,30 +6,73 @@
#define V8_UNICODE_DECODER_H_
#include <sys/types.h>
+#include <algorithm>
#include "src/globals.h"
+#include "src/unicode.h"
#include "src/utils.h"
+#include "src/vector.h"
namespace unibrow {
+class Utf8Iterator {
+ public:
+ explicit Utf8Iterator(const v8::internal::Vector<const char>& stream)
+ : Utf8Iterator(stream, 0, false) {}
+ Utf8Iterator(const v8::internal::Vector<const char>& stream, size_t offset,
+ bool trailing)
+ : stream_(stream),
+ cursor_(offset),
+ offset_(0),
+ char_(0),
+ trailing_(false) {
+ DCHECK_LE(offset, stream.length());
+ // Read the first char, setting offset_ to offset in the process.
+ ++*this;
+
+ // This must be set after reading the first char, since the offset marks
+ // the start of the octet sequence that the trailing char is part of.
+ trailing_ = trailing;
+ if (trailing) {
+ DCHECK_GT(char_, Utf16::kMaxNonSurrogateCharCode);
+ }
+ }
+
+ uint16_t operator*();
+ Utf8Iterator& operator++();
+ Utf8Iterator operator++(int);
+ bool Done();
+ bool Trailing() { return trailing_; }
+ size_t Offset() { return offset_; }
+
+ private:
+ const v8::internal::Vector<const char>& stream_;
+ size_t cursor_;
+ size_t offset_;
+ uint32_t char_;
+ bool trailing_;
+};
+
class V8_EXPORT_PRIVATE Utf8DecoderBase {
public:
// Initialization done in subclass.
inline Utf8DecoderBase();
inline Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
- const uint8_t* stream, size_t stream_length);
+ const v8::internal::Vector<const char>& stream);
inline size_t Utf16Length() const { return utf16_length_; }
protected:
// This reads all characters and sets the utf16_length_.
// The first buffer_length utf16 chars are cached in the buffer.
- void Reset(uint16_t* buffer, size_t buffer_length, const uint8_t* stream,
- size_t stream_length);
- static void WriteUtf16Slow(const uint8_t* stream, size_t stream_length,
- uint16_t* data, size_t length);
- const uint8_t* unbuffered_start_;
- size_t unbuffered_length_;
+ void Reset(uint16_t* buffer, size_t buffer_length,
+ const v8::internal::Vector<const char>& vector);
+ static void WriteUtf16Slow(uint16_t* data, size_t length,
+ const v8::internal::Vector<const char>& stream,
+ size_t offset, bool trailing);
+
+ size_t bytes_read_;
+ size_t chars_written_;
size_t utf16_length_;
- bool last_byte_of_buffer_unused_;
+ bool trailing_;
private:
DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
@@ -39,69 +82,63 @@ template <size_t kBufferSize>
class Utf8Decoder : public Utf8DecoderBase {
public:
inline Utf8Decoder() {}
- inline Utf8Decoder(const char* stream, size_t length);
- inline void Reset(const char* stream, size_t length);
- inline size_t WriteUtf16(uint16_t* data, size_t length) const;
+ explicit inline Utf8Decoder(const v8::internal::Vector<const char>& stream);
+ inline void Reset(const v8::internal::Vector<const char>& stream);
+ inline size_t WriteUtf16(
+ uint16_t* data, size_t length,
+ const v8::internal::Vector<const char>& stream) const;
private:
uint16_t buffer_[kBufferSize];
};
Utf8DecoderBase::Utf8DecoderBase()
- : unbuffered_start_(nullptr),
- unbuffered_length_(0),
- utf16_length_(0),
- last_byte_of_buffer_unused_(false) {}
-
-Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
- const uint8_t* stream, size_t stream_length) {
- Reset(buffer, buffer_length, stream, stream_length);
-}
+ : bytes_read_(0), chars_written_(0), utf16_length_(0), trailing_(false) {}
+Utf8DecoderBase::Utf8DecoderBase(
+ uint16_t* buffer, size_t buffer_length,
+ const v8::internal::Vector<const char>& stream) {
+ Reset(buffer, buffer_length, stream);
+}
template <size_t kBufferSize>
-Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, size_t length)
- : Utf8DecoderBase(buffer_, kBufferSize,
- reinterpret_cast<const uint8_t*>(stream), length) {}
-
+Utf8Decoder<kBufferSize>::Utf8Decoder(
+ const v8::internal::Vector<const char>& stream)
+ : Utf8DecoderBase(buffer_, kBufferSize, stream) {}
template <size_t kBufferSize>
-void Utf8Decoder<kBufferSize>::Reset(const char* stream, size_t length) {
- Utf8DecoderBase::Reset(buffer_, kBufferSize,
- reinterpret_cast<const uint8_t*>(stream), length);
+void Utf8Decoder<kBufferSize>::Reset(
+ const v8::internal::Vector<const char>& stream) {
+ Utf8DecoderBase::Reset(buffer_, kBufferSize, stream);
}
-
template <size_t kBufferSize>
-size_t Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
- size_t length) const {
- DCHECK_GT(length, 0);
- if (length > utf16_length_) length = utf16_length_;
+size_t Utf8Decoder<kBufferSize>::WriteUtf16(
+ uint16_t* data, size_t data_length,
+ const v8::internal::Vector<const char>& stream) const {
+ DCHECK_GT(data_length, 0);
+ data_length = std::min(data_length, utf16_length_);
+
// memcpy everything in buffer.
- size_t buffer_length =
- last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
- size_t memcpy_length = length <= buffer_length ? length : buffer_length;
+ size_t memcpy_length = std::min(data_length, chars_written_);
v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t));
- if (length <= buffer_length) return length;
- DCHECK_NOT_NULL(unbuffered_start_);
+
+ if (data_length <= chars_written_) return data_length;
+
// Copy the rest the slow way.
- WriteUtf16Slow(unbuffered_start_, unbuffered_length_, data + buffer_length,
- length - buffer_length);
- return length;
+ WriteUtf16Slow(data + chars_written_, data_length - chars_written_, stream,
+ bytes_read_, trailing_);
+ return data_length;
}
class Latin1 {
public:
static const unsigned kMaxChar = 0xff;
- // Returns 0 if character does not convert to single latin-1 character
- // or if the character doesn't not convert back to latin-1 via inverse
- // operation (upper to lower, etc).
- static inline uint16_t ConvertNonLatin1ToLatin1(uint16_t);
+ // Convert the character to Latin-1 case equivalent if possible.
+ static inline uint16_t TryConvertToLatin1(uint16_t);
};
-
-uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
- DCHECK_GT(c, Latin1::kMaxChar);
+uint16_t Latin1::TryConvertToLatin1(uint16_t c) {
switch (c) {
// This are equivalent characters in unicode.
case 0x39c:
@@ -112,7 +149,7 @@ uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
case 0x178:
return 0xff;
}
- return 0;
+ return c;
}
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index c6ce9a8eb2..75f53e22d1 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -204,6 +204,10 @@ V8_INLINE bool IsLineTerminator(uchar c) {
return c == 0x000A || c == 0x000D || c == 0x2028 || c == 0x2029;
}
+V8_INLINE bool IsStringLiteralLineTerminator(uchar c) {
+ return c == 0x000A || c == 0x000D;
+}
+
#ifndef V8_INTL_SUPPORT
struct ToLowercase {
static const int kMaxWidth = 3;
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 5b5d95ce9a..f3e2718fe9 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -18,6 +18,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/v8-fallthrough.h"
#include "src/globals.h"
#include "src/vector.h"
#include "src/zone/zone.h"
@@ -79,9 +80,15 @@ inline int WhichPowerOf2(T x) {
#undef CHECK_BIGGER
switch (x) {
default: UNREACHABLE();
- case 8: bits++; // Fall through.
- case 4: bits++; // Fall through.
- case 2: bits++; // Fall through.
+ case 8:
+ bits++;
+ V8_FALLTHROUGH;
+ case 4:
+ bits++;
+ V8_FALLTHROUGH;
+ case 2:
+ bits++;
+ V8_FALLTHROUGH;
case 1: break;
}
DCHECK_EQ(T{1} << bits, original_x);
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index a6d97e8ff1..ab4918efec 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -81,7 +81,6 @@ void V8::InitializeOncePerProcessImpl() {
sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
- SetUpJSCallerSavedCodeData();
ExternalReference::SetUp();
Bootstrapper::InitializeOncePerProcess();
}
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
deleted file mode 100644
index 1f0ad0a467..0000000000
--- a/deps/v8/src/v8.gyp
+++ /dev/null
@@ -1,2630 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'v8_code': 1,
- 'v8_random_seed%': 314159265,
- 'v8_vector_stores%': 0,
- 'embed_script%': "",
- 'warmup_script%': "",
- 'v8_extra_library_files%': [],
- 'v8_experimental_extra_library_files%': [],
- 'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
- 'v8_os_page_size%': 0,
- },
- 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
- 'targets': [
- {
- 'target_name': 'v8',
- 'dependencies_traverse': 1,
- 'dependencies': ['v8_maybe_snapshot', 'v8_dump_build_config#target'],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'type': '<(component)',
- 'sources': [
- # Note: on non-Windows we still build this file so that gyp
- # has some sources to link into the component.
- 'v8dll-main.cc',
- ],
- 'include_dirs': [
- '..',
- ],
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- },
- 'conditions': [
- ['OS=="mac"', {
- 'xcode_settings': {
- 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
- },
- }],
- ['soname_version!=""', {
- 'product_extension': 'so.<(soname_version)',
- }],
- ],
- },
- {
- 'type': 'none',
- }],
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- },
- {
- # This rule delegates to either v8_snapshot, v8_nosnapshot, or
- # v8_external_snapshot, depending on the current variables.
- # The intention is to make the 'calling' rules a bit simpler.
- 'target_name': 'v8_maybe_snapshot',
- 'type': 'none',
- 'conditions': [
- ['v8_use_snapshot!="true"', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_init', 'v8_nosnapshot'],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_snapshot'],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'target_conditions': [
- ['_toolset=="host"', {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- ],
- }, {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- }],
- ],
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ]
- },
- {
- 'target_name': 'v8_init',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_initializers',
- ],
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- '../include',
- ],
- 'sources': [ ### gcmole(all) ###
- 'setup-isolate-full.cc',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- },
- {
- 'target_name': 'v8_initializers',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_base',
- ],
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- '../include',
- ],
- 'sources': [ ### gcmole(all) ###
- 'builtins/builtins-arguments-gen.cc',
- 'builtins/builtins-arguments-gen.h',
- 'builtins/builtins-array-gen.cc',
- 'builtins/builtins-async-function-gen.cc',
- 'builtins/builtins-async-gen.cc',
- 'builtins/builtins-async-gen.h',
- 'builtins/builtins-async-generator-gen.cc',
- 'builtins/builtins-async-iterator-gen.cc',
- 'builtins/builtins-boolean-gen.cc',
- 'builtins/builtins-call-gen.cc',
- 'builtins/builtins-call-gen.h',
- 'builtins/builtins-collections-gen.cc',
- 'builtins/builtins-console-gen.cc',
- 'builtins/builtins-constructor-gen.cc',
- 'builtins/builtins-constructor-gen.h',
- 'builtins/builtins-constructor.h',
- 'builtins/builtins-conversion-gen.cc',
- 'builtins/builtins-date-gen.cc',
- 'builtins/builtins-debug-gen.cc',
- 'builtins/builtins-function-gen.cc',
- 'builtins/builtins-generator-gen.cc',
- 'builtins/builtins-global-gen.cc',
- 'builtins/builtins-handler-gen.cc',
- 'builtins/builtins-ic-gen.cc',
- 'builtins/builtins-internal-gen.cc',
- 'builtins/builtins-interpreter-gen.cc',
- 'builtins/builtins-intl-gen.cc',
- 'builtins/builtins-iterator-gen.h',
- 'builtins/builtins-iterator-gen.cc',
- 'builtins/builtins-math-gen.cc',
- 'builtins/builtins-math-gen.h',
- 'builtins/builtins-number-gen.cc',
- 'builtins/builtins-object-gen.cc',
- 'builtins/builtins-promise-gen.cc',
- 'builtins/builtins-promise-gen.h',
- 'builtins/builtins-proxy-gen.cc',
- 'builtins/builtins-proxy-gen.h',
- 'builtins/builtins-reflect-gen.cc',
- 'builtins/builtins-regexp-gen.cc',
- 'builtins/builtins-regexp-gen.h',
- 'builtins/builtins-sharedarraybuffer-gen.cc',
- 'builtins/builtins-string-gen.cc',
- 'builtins/builtins-string-gen.h',
- 'builtins/builtins-symbol-gen.cc',
- 'builtins/builtins-typedarray-gen.cc',
- 'builtins/builtins-utils-gen.h',
- 'builtins/builtins-wasm-gen.cc',
- 'builtins/setup-builtins-internal.cc',
- 'heap/setup-heap-internal.cc',
- 'ic/accessor-assembler.cc',
- 'ic/accessor-assembler.h',
- 'ic/binary-op-assembler.cc',
- 'ic/binary-op-assembler.h',
- 'ic/keyed-store-generic.cc',
- 'ic/keyed-store-generic.h',
- 'interpreter/interpreter-assembler.cc',
- 'interpreter/interpreter-assembler.h',
- 'interpreter/interpreter-generator.cc',
- 'interpreter/interpreter-generator.h',
- 'interpreter/interpreter-intrinsics-generator.cc',
- 'interpreter/interpreter-intrinsics-generator.h',
- 'interpreter/setup-interpreter-internal.cc',
- 'interpreter/setup-interpreter.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- 'builtins/ia32/builtins-ia32.cc',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [ ### gcmole(arch:x64) ###
- 'builtins/x64/builtins-x64.cc',
- ],
- }],
- ['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'builtins/arm/builtins-arm.cc',
- ],
- }],
- ['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- 'builtins/arm64/builtins-arm64.cc',
- ],
- }],
- ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- 'builtins/mips/builtins-mips.cc',
- ],
- }],
- ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
- 'sources': [ ### gcmole(arch:mips64el) ###
- 'builtins/mips64/builtins-mips64.cc',
- ],
- }],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- 'builtins/ppc/builtins-ppc.cc',
- ],
- }],
- ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390) ###
- 'builtins/s390/builtins-s390.cc',
- ],
- }],
- ['v8_enable_i18n_support==0', {
- 'sources!': [
- 'builtins/builtins-intl-gen.cc',
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_snapshot',
- 'type': 'static_library',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'mksnapshot#host',
- 'js2c#host',
- ],
- }, {
- 'toolsets': ['target'],
- 'dependencies': [
- 'mksnapshot',
- 'js2c',
- ],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- },
- }],
- ],
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- 'setup-isolate-deserialize.cc',
- ],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot',
- 'inputs': [
- '<(mksnapshot_exec)',
- ],
- 'conditions': [
- ['embed_script!=""', {
- 'inputs': [
- '<(embed_script)',
- ],
- }],
- ['warmup_script!=""', {
- 'inputs': [
- '<(warmup_script)',
- ],
- }],
- ],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- ],
- 'variables': {
- 'mksnapshot_flags': [],
- 'conditions': [
- ['v8_random_seed!=0', {
- 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
- }],
- ['v8_vector_stores!=0', {
- 'mksnapshot_flags': ['--vector-stores'],
- }],
- ],
- },
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- },
- ],
- },
- {
- 'target_name': 'v8_nosnapshot',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'snapshot/snapshot-empty.cc',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': ['js2c#host'],
- }, {
- 'toolsets': ['target'],
- 'dependencies': ['js2c'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- }],
- ]
- },
- {
- 'target_name': 'v8_external_snapshot',
- 'type': 'static_library',
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'mksnapshot#host',
- 'js2c#host',
- 'natives_blob',
- ]}, {
- 'toolsets': ['target'],
- 'dependencies': [
- 'mksnapshot',
- 'js2c',
- 'natives_blob',
- ],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- },
- }],
- ],
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- 'setup-isolate-deserialize.cc',
- 'snapshot/natives-external.cc',
- 'snapshot/snapshot-external.cc',
- ],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot (external)',
- 'inputs': [
- '<(mksnapshot_exec)',
- ],
- 'variables': {
- 'mksnapshot_flags': [],
- 'conditions': [
- ['v8_random_seed!=0', {
- 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
- }],
- ['v8_vector_stores!=0', {
- 'mksnapshot_flags': ['--vector-stores'],
- }],
- ['v8_os_page_size!=0', {
- 'mksnapshot_flags': ['--v8_os_page_size', '<(v8_os_page_size)'],
- }],
- ],
- },
- 'conditions': [
- ['embed_script!=""', {
- 'inputs': [
- '<(embed_script)',
- ],
- }],
- ['warmup_script!=""', {
- 'inputs': [
- '<(warmup_script)',
- ],
- }],
- ['want_separate_host_toolset==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- ],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- },
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_base',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_libbase',
- 'v8_libsampler',
- 'inspector/inspector.gyp:protocol_generated_sources#target',
- 'inspector/inspector.gyp:inspector_injected_script#target',
- ],
- 'objs': ['foo.o'],
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- '<(SHARED_INTERMEDIATE_DIR)'
- ],
- 'sources': [ ### gcmole(all) ###
- '<@(inspector_all_sources)',
- '../include/v8-debug.h',
- '../include/v8-platform.h',
- '../include/v8-profiler.h',
- '../include/v8-testing.h',
- '../include/v8-util.h',
- '../include/v8-value-serializer-version.h',
- '../include/v8-version-string.h',
- '../include/v8-version.h',
- '../include/v8.h',
- '../include/v8config.h',
- 'accessors.cc',
- 'accessors.h',
- 'address-map.cc',
- 'address-map.h',
- 'allocation.cc',
- 'allocation.h',
- 'allocation-site-scopes.h',
- 'api.cc',
- 'api.h',
- 'api-arguments-inl.h',
- 'api-arguments.cc',
- 'api-arguments.h',
- 'api-natives.cc',
- 'api-natives.h',
- 'arguments.cc',
- 'arguments.h',
- 'asmjs/asm-js.cc',
- 'asmjs/asm-js.h',
- 'asmjs/asm-names.h',
- 'asmjs/asm-parser.cc',
- 'asmjs/asm-parser.h',
- 'asmjs/asm-scanner.cc',
- 'asmjs/asm-scanner.h',
- 'asmjs/asm-types.cc',
- 'asmjs/asm-types.h',
- 'asmjs/switch-logic.h',
- 'asmjs/switch-logic.cc',
- 'assembler.cc',
- 'assembler.h',
- 'assembler-inl.h',
- 'assert-scope.h',
- 'assert-scope.cc',
- 'ast/ast-function-literal-id-reindexer.cc',
- 'ast/ast-function-literal-id-reindexer.h',
- 'ast/ast-numbering.cc',
- 'ast/ast-numbering.h',
- 'ast/ast-source-ranges.h',
- 'ast/ast-traversal-visitor.h',
- 'ast/ast-value-factory.cc',
- 'ast/ast-value-factory.h',
- 'ast/ast.cc',
- 'ast/ast.h',
- 'ast/compile-time-value.cc',
- 'ast/compile-time-value.h',
- 'ast/context-slot-cache.cc',
- 'ast/context-slot-cache.h',
- 'ast/modules.cc',
- 'ast/modules.h',
- 'ast/prettyprinter.cc',
- 'ast/prettyprinter.h',
- 'ast/scopes.cc',
- 'ast/scopes.h',
- 'ast/variables.cc',
- 'ast/variables.h',
- 'bailout-reason.cc',
- 'bailout-reason.h',
- 'basic-block-profiler.cc',
- 'basic-block-profiler.h',
- 'bignum-dtoa.cc',
- 'bignum-dtoa.h',
- 'bignum.cc',
- 'bignum.h',
- 'bit-vector.cc',
- 'bit-vector.h',
- 'bootstrapper.cc',
- 'bootstrapper.h',
- 'boxed-float.h',
- 'builtins/builtins-api.cc',
- 'builtins/builtins-arraybuffer.cc',
- 'builtins/builtins-array.cc',
- 'builtins/builtins-bigint.cc',
- 'builtins/builtins-boolean.cc',
- 'builtins/builtins-call.cc',
- 'builtins/builtins-callsite.cc',
- 'builtins/builtins-collections.cc',
- 'builtins/builtins-console.cc',
- 'builtins/builtins-constructor.h',
- 'builtins/builtins-dataview.cc',
- 'builtins/builtins-date.cc',
- 'builtins/builtins-definitions.h',
- 'builtins/builtins-descriptors.h',
- 'builtins/builtins-error.cc',
- 'builtins/builtins-function.cc',
- 'builtins/builtins-global.cc',
- 'builtins/builtins-internal.cc',
- 'builtins/builtins-interpreter.cc',
- 'builtins/builtins-json.cc',
- 'builtins/builtins-math.cc',
- 'builtins/builtins-number.cc',
- 'builtins/builtins-object.cc',
- 'builtins/builtins-promise.cc',
- 'builtins/builtins-reflect.cc',
- 'builtins/builtins-regexp.cc',
- 'builtins/builtins-sharedarraybuffer.cc',
- 'builtins/builtins-string.cc',
- 'builtins/builtins-intl.cc',
- 'builtins/builtins-intl.h',
- 'builtins/builtins-symbol.cc',
- 'builtins/builtins-typedarray.cc',
- 'builtins/builtins-utils.h',
- 'builtins/builtins.cc',
- 'builtins/builtins.h',
- 'cached-powers.cc',
- 'cached-powers.h',
- 'callable.h',
- 'cancelable-task.cc',
- 'cancelable-task.h',
- 'char-predicates.cc',
- 'char-predicates-inl.h',
- 'char-predicates.h',
- 'checks.h',
- 'code-events.h',
- 'code-factory.cc',
- 'code-factory.h',
- 'code-stub-assembler.cc',
- 'code-stub-assembler.h',
- 'code-stubs.cc',
- 'code-stubs.h',
- 'code-stubs-utils.h',
- 'codegen.cc',
- 'codegen.h',
- 'collector.h',
- 'compilation-cache.cc',
- 'compilation-cache.h',
- 'compilation-dependencies.cc',
- 'compilation-dependencies.h',
- 'compilation-info.cc',
- 'compilation-info.h',
- 'compilation-statistics.cc',
- 'compilation-statistics.h',
- 'compiler/access-builder.cc',
- 'compiler/access-builder.h',
- 'compiler/access-info.cc',
- 'compiler/access-info.h',
- 'compiler/all-nodes.cc',
- 'compiler/all-nodes.h',
- 'compiler/allocation-builder.h',
- 'compiler/basic-block-instrumentor.cc',
- 'compiler/basic-block-instrumentor.h',
- 'compiler/branch-elimination.cc',
- 'compiler/branch-elimination.h',
- 'compiler/bytecode-analysis.cc',
- 'compiler/bytecode-analysis.h',
- 'compiler/bytecode-graph-builder.cc',
- 'compiler/bytecode-graph-builder.h',
- 'compiler/bytecode-liveness-map.cc',
- 'compiler/bytecode-liveness-map.h',
- 'compiler/c-linkage.cc',
- 'compiler/checkpoint-elimination.cc',
- 'compiler/checkpoint-elimination.h',
- 'compiler/code-generator-impl.h',
- 'compiler/code-generator.cc',
- 'compiler/code-generator.h',
- 'compiler/code-assembler.cc',
- 'compiler/code-assembler.h',
- 'compiler/common-node-cache.cc',
- 'compiler/common-node-cache.h',
- 'compiler/common-operator-reducer.cc',
- 'compiler/common-operator-reducer.h',
- 'compiler/common-operator.cc',
- 'compiler/common-operator.h',
- 'compiler/control-equivalence.cc',
- 'compiler/control-equivalence.h',
- 'compiler/control-flow-optimizer.cc',
- 'compiler/control-flow-optimizer.h',
- 'compiler/dead-code-elimination.cc',
- 'compiler/dead-code-elimination.h',
- 'compiler/diamond.h',
- 'compiler/effect-control-linearizer.cc',
- 'compiler/effect-control-linearizer.h',
- 'compiler/escape-analysis.cc',
- 'compiler/escape-analysis.h',
- 'compiler/escape-analysis-reducer.cc',
- 'compiler/escape-analysis-reducer.h',
- 'compiler/frame.cc',
- 'compiler/frame.h',
- 'compiler/frame-elider.cc',
- 'compiler/frame-elider.h',
- 'compiler/frame-states.cc',
- 'compiler/frame-states.h',
- 'compiler/gap-resolver.cc',
- 'compiler/gap-resolver.h',
- 'compiler/graph-assembler.cc',
- 'compiler/graph-assembler.h',
- 'compiler/graph-reducer.cc',
- 'compiler/graph-reducer.h',
- 'compiler/graph-trimmer.cc',
- 'compiler/graph-trimmer.h',
- 'compiler/graph-visualizer.cc',
- 'compiler/graph-visualizer.h',
- 'compiler/graph.cc',
- 'compiler/graph.h',
- 'compiler/instruction-codes.h',
- 'compiler/instruction-selector-impl.h',
- 'compiler/instruction-selector.cc',
- 'compiler/instruction-selector.h',
- 'compiler/instruction-scheduler.cc',
- 'compiler/instruction-scheduler.h',
- 'compiler/instruction.cc',
- 'compiler/instruction.h',
- 'compiler/int64-lowering.cc',
- 'compiler/int64-lowering.h',
- 'compiler/js-builtin-reducer.cc',
- 'compiler/js-builtin-reducer.h',
- 'compiler/js-call-reducer.cc',
- 'compiler/js-call-reducer.h',
- 'compiler/js-context-specialization.cc',
- 'compiler/js-context-specialization.h',
- 'compiler/js-create-lowering.cc',
- 'compiler/js-create-lowering.h',
- 'compiler/js-generic-lowering.cc',
- 'compiler/js-generic-lowering.h',
- 'compiler/js-graph.cc',
- 'compiler/js-graph.h',
- 'compiler/js-inlining.cc',
- 'compiler/js-inlining.h',
- 'compiler/js-inlining-heuristic.cc',
- 'compiler/js-inlining-heuristic.h',
- 'compiler/js-intrinsic-lowering.cc',
- 'compiler/js-intrinsic-lowering.h',
- 'compiler/js-native-context-specialization.cc',
- 'compiler/js-native-context-specialization.h',
- 'compiler/js-operator.cc',
- 'compiler/js-operator.h',
- 'compiler/js-type-hint-lowering.cc',
- 'compiler/js-type-hint-lowering.h',
- 'compiler/js-typed-lowering.cc',
- 'compiler/js-typed-lowering.h',
- 'compiler/jump-threading.cc',
- 'compiler/jump-threading.h',
- 'compiler/linkage.cc',
- 'compiler/linkage.h',
- 'compiler/live-range-separator.cc',
- 'compiler/live-range-separator.h',
- 'compiler/load-elimination.cc',
- 'compiler/load-elimination.h',
- 'compiler/loop-analysis.cc',
- 'compiler/loop-analysis.h',
- 'compiler/loop-peeling.cc',
- 'compiler/loop-peeling.h',
- 'compiler/loop-variable-optimizer.cc',
- 'compiler/loop-variable-optimizer.h',
- 'compiler/machine-operator-reducer.cc',
- 'compiler/machine-operator-reducer.h',
- 'compiler/machine-operator.cc',
- 'compiler/machine-operator.h',
- 'compiler/machine-graph-verifier.cc',
- 'compiler/machine-graph-verifier.h',
- 'compiler/memory-optimizer.cc',
- 'compiler/memory-optimizer.h',
- 'compiler/move-optimizer.cc',
- 'compiler/move-optimizer.h',
- 'compiler/node-aux-data.h',
- 'compiler/node-cache.cc',
- 'compiler/node-cache.h',
- 'compiler/node-marker.cc',
- 'compiler/node-marker.h',
- 'compiler/node-matchers.cc',
- 'compiler/node-matchers.h',
- 'compiler/node-properties.cc',
- 'compiler/node-properties.h',
- 'compiler/node.cc',
- 'compiler/node.h',
- 'compiler/opcodes.cc',
- 'compiler/opcodes.h',
- 'compiler/operation-typer.cc',
- 'compiler/operation-typer.h',
- 'compiler/operator-properties.cc',
- 'compiler/operator-properties.h',
- 'compiler/operator.cc',
- 'compiler/operator.h',
- 'compiler/osr.cc',
- 'compiler/osr.h',
- 'compiler/persistent-map.h',
- 'compiler/pipeline.cc',
- 'compiler/pipeline.h',
- 'compiler/pipeline-statistics.cc',
- 'compiler/pipeline-statistics.h',
- 'compiler/property-access-builder.cc',
- 'compiler/property-access-builder.h',
- 'compiler/raw-machine-assembler.cc',
- 'compiler/raw-machine-assembler.h',
- 'compiler/redundancy-elimination.cc',
- 'compiler/redundancy-elimination.h',
- 'compiler/register-allocator.cc',
- 'compiler/register-allocator.h',
- 'compiler/register-allocator-verifier.cc',
- 'compiler/register-allocator-verifier.h',
- 'compiler/representation-change.cc',
- 'compiler/representation-change.h',
- 'compiler/schedule.cc',
- 'compiler/schedule.h',
- 'compiler/scheduler.cc',
- 'compiler/scheduler.h',
- 'compiler/select-lowering.cc',
- 'compiler/select-lowering.h',
- 'compiler/simd-scalar-lowering.cc',
- 'compiler/simd-scalar-lowering.h',
- 'compiler/simplified-lowering.cc',
- 'compiler/simplified-lowering.h',
- 'compiler/simplified-operator-reducer.cc',
- 'compiler/simplified-operator-reducer.h',
- 'compiler/simplified-operator.cc',
- 'compiler/simplified-operator.h',
- 'compiler/compiler-source-position-table.cc',
- 'compiler/compiler-source-position-table.h',
- 'compiler/state-values-utils.cc',
- 'compiler/state-values-utils.h',
- 'compiler/store-store-elimination.cc',
- 'compiler/store-store-elimination.h',
- 'compiler/types.cc',
- 'compiler/types.h',
- 'compiler/type-cache.cc',
- 'compiler/type-cache.h',
- 'compiler/typed-optimization.cc',
- 'compiler/typed-optimization.h',
- 'compiler/typer.cc',
- 'compiler/typer.h',
- 'compiler/unwinding-info-writer.h',
- 'compiler/value-numbering-reducer.cc',
- 'compiler/value-numbering-reducer.h',
- 'compiler/verifier.cc',
- 'compiler/verifier.h',
- 'compiler/wasm-compiler.cc',
- 'compiler/wasm-compiler.h',
- 'compiler/wasm-linkage.cc',
- 'compiler/zone-stats.cc',
- 'compiler/zone-stats.h',
- 'compiler-dispatcher/compiler-dispatcher.cc',
- 'compiler-dispatcher/compiler-dispatcher.h',
- 'compiler-dispatcher/compiler-dispatcher-job.cc',
- 'compiler-dispatcher/compiler-dispatcher-job.h',
- 'compiler-dispatcher/compiler-dispatcher-tracer.cc',
- 'compiler-dispatcher/compiler-dispatcher-tracer.h',
- 'compiler-dispatcher/optimizing-compile-dispatcher.cc',
- 'compiler-dispatcher/optimizing-compile-dispatcher.h',
- 'compiler-dispatcher/unoptimized-compile-job.cc',
- 'compiler-dispatcher/unoptimized-compile-job.h',
- 'compiler.cc',
- 'compiler.h',
- 'contexts-inl.h',
- 'contexts.cc',
- 'contexts.h',
- 'conversions-inl.h',
- 'conversions.cc',
- 'conversions.h',
- 'counters-inl.h',
- 'counters.cc',
- 'counters.h',
- 'date.cc',
- 'date.h',
- 'dateparser-inl.h',
- 'dateparser.cc',
- 'dateparser.h',
- 'debug/debug-coverage.cc',
- 'debug/debug-coverage.h',
- 'debug/debug-evaluate.cc',
- 'debug/debug-evaluate.h',
- 'debug/debug-interface.h',
- 'debug/debug-frames.cc',
- 'debug/debug-frames.h',
- 'debug/debug-scope-iterator.cc',
- 'debug/debug-scope-iterator.h',
- 'debug/debug-scopes.cc',
- 'debug/debug-scopes.h',
- 'debug/debug-stack-trace-iterator.cc',
- 'debug/debug-stack-trace-iterator.h',
- 'debug/debug-type-profile.cc',
- 'debug/debug-type-profile.h',
- 'debug/debug.cc',
- 'debug/debug.h',
- 'debug/interface-types.h',
- 'debug/liveedit.cc',
- 'debug/liveedit.h',
- 'deoptimize-reason.cc',
- 'deoptimize-reason.h',
- 'deoptimizer.cc',
- 'deoptimizer.h',
- 'detachable-vector.h',
- 'disasm.h',
- 'disassembler.cc',
- 'disassembler.h',
- 'diy-fp.cc',
- 'diy-fp.h',
- 'double.h',
- 'dtoa.cc',
- 'dtoa.h',
- 'eh-frame.cc',
- 'eh-frame.h',
- 'elements-kind.cc',
- 'elements-kind.h',
- 'elements.cc',
- 'elements.h',
- 'execution.cc',
- 'execution.h',
- 'extensions/externalize-string-extension.cc',
- 'extensions/externalize-string-extension.h',
- 'extensions/free-buffer-extension.cc',
- 'extensions/free-buffer-extension.h',
- 'extensions/gc-extension.cc',
- 'extensions/gc-extension.h',
- 'extensions/ignition-statistics-extension.cc',
- 'extensions/ignition-statistics-extension.h',
- 'extensions/statistics-extension.cc',
- 'extensions/statistics-extension.h',
- 'extensions/trigger-failure-extension.cc',
- 'extensions/trigger-failure-extension.h',
- 'external-reference-table.cc',
- 'external-reference-table.h',
- 'factory-inl.h',
- 'factory.cc',
- 'factory.h',
- 'fast-dtoa.cc',
- 'fast-dtoa.h',
- 'feedback-vector-inl.h',
- 'feedback-vector.cc',
- 'feedback-vector.h',
- 'field-index.h',
- 'field-index-inl.h',
- 'field-type.cc',
- 'field-type.h',
- 'fixed-dtoa.cc',
- 'fixed-dtoa.h',
- 'flag-definitions.h',
- 'flags.cc',
- 'flags.h',
- 'frame-constants.h',
- 'frames-inl.h',
- 'frames.cc',
- 'frames.h',
- 'futex-emulation.cc',
- 'futex-emulation.h',
- 'gdb-jit.cc',
- 'gdb-jit.h',
- 'global-handles.cc',
- 'global-handles.h',
- 'globals.h',
- 'handles-inl.h',
- 'handles.cc',
- 'handles.h',
- 'heap-symbols.h',
- 'heap/array-buffer-collector.cc',
- 'heap/array-buffer-collector.h',
- 'heap/array-buffer-tracker-inl.h',
- 'heap/array-buffer-tracker.cc',
- 'heap/array-buffer-tracker.h',
- 'heap/barrier.h',
- 'heap/code-stats.cc',
- 'heap/code-stats.h',
- 'heap/concurrent-marking.cc',
- 'heap/concurrent-marking.h',
- 'heap/embedder-tracing.cc',
- 'heap/embedder-tracing.h',
- 'heap/memory-reducer.cc',
- 'heap/memory-reducer.h',
- 'heap/gc-idle-time-handler.cc',
- 'heap/gc-idle-time-handler.h',
- 'heap/gc-tracer.cc',
- 'heap/gc-tracer.h',
- 'heap/heap-inl.h',
- 'heap/heap.cc',
- 'heap/heap.h',
- 'heap/incremental-marking-inl.h',
- 'heap/incremental-marking-job.cc',
- 'heap/incremental-marking-job.h',
- 'heap/incremental-marking.cc',
- 'heap/incremental-marking.h',
- 'heap/invalidated-slots-inl.h',
- 'heap/invalidated-slots.cc',
- 'heap/invalidated-slots.h',
- 'heap/item-parallel-job.h',
- 'heap/local-allocator.h',
- 'heap/mark-compact-inl.h',
- 'heap/mark-compact.cc',
- 'heap/mark-compact.h',
- 'heap/marking.cc',
- 'heap/marking.h',
- 'heap/object-stats.cc',
- 'heap/object-stats.h',
- 'heap/objects-visiting-inl.h',
- 'heap/objects-visiting.cc',
- 'heap/objects-visiting.h',
- 'heap/remembered-set.h',
- 'heap/scavenge-job.h',
- 'heap/scavenge-job.cc',
- 'heap/scavenger-inl.h',
- 'heap/scavenger.cc',
- 'heap/scavenger.h',
- 'heap/slot-set.h',
- 'heap/spaces-inl.h',
- 'heap/spaces.cc',
- 'heap/spaces.h',
- 'heap/store-buffer.cc',
- 'heap/store-buffer.h',
- 'heap/stress-marking-observer.cc',
- 'heap/stress-marking-observer.h',
- 'heap/stress-scavenge-observer.cc',
- 'heap/stress-scavenge-observer.h',
- 'heap/sweeper.cc',
- 'heap/sweeper.h',
- 'heap/worklist.h',
- 'intl.cc',
- 'intl.h',
- 'icu_util.cc',
- 'icu_util.h',
- 'ic/call-optimization.cc',
- 'ic/call-optimization.h',
- 'ic/handler-configuration.cc',
- 'ic/handler-configuration-inl.h',
- 'ic/handler-configuration.h',
- 'ic/ic-inl.h',
- 'ic/ic-stats.cc',
- 'ic/ic-stats.h',
- 'ic/ic.cc',
- 'ic/ic.h',
- 'identity-map.cc',
- 'identity-map.h',
- 'interface-descriptors.cc',
- 'interface-descriptors.h',
- 'interpreter/block-coverage-builder.h',
- 'interpreter/bytecodes.cc',
- 'interpreter/bytecodes.h',
- 'interpreter/bytecode-array-accessor.cc',
- 'interpreter/bytecode-array-accessor.h',
- 'interpreter/bytecode-array-builder.cc',
- 'interpreter/bytecode-array-builder.h',
- 'interpreter/bytecode-array-iterator.cc',
- 'interpreter/bytecode-array-iterator.h',
- 'interpreter/bytecode-array-random-iterator.cc',
- 'interpreter/bytecode-array-random-iterator.h',
- 'interpreter/bytecode-array-writer.cc',
- 'interpreter/bytecode-array-writer.h',
- 'interpreter/bytecode-decoder.cc',
- 'interpreter/bytecode-decoder.h',
- 'interpreter/bytecode-flags.cc',
- 'interpreter/bytecode-flags.h',
- 'interpreter/bytecode-generator.cc',
- 'interpreter/bytecode-generator.h',
- 'interpreter/bytecode-label.cc',
- 'interpreter/bytecode-label.h',
- 'interpreter/bytecode-node.cc',
- 'interpreter/bytecode-node.h',
- 'interpreter/bytecode-operands.cc',
- 'interpreter/bytecode-operands.h',
- 'interpreter/bytecode-register.cc',
- 'interpreter/bytecode-register.h',
- 'interpreter/bytecode-register-allocator.h',
- 'interpreter/bytecode-register-optimizer.cc',
- 'interpreter/bytecode-register-optimizer.h',
- 'interpreter/bytecode-source-info.cc',
- 'interpreter/bytecode-source-info.h',
- 'interpreter/bytecode-jump-table.h',
- 'interpreter/bytecode-traits.h',
- 'interpreter/constant-array-builder.cc',
- 'interpreter/constant-array-builder.h',
- 'interpreter/control-flow-builders.cc',
- 'interpreter/control-flow-builders.h',
- 'interpreter/handler-table-builder.cc',
- 'interpreter/handler-table-builder.h',
- 'interpreter/interpreter.cc',
- 'interpreter/interpreter.h',
- 'interpreter/interpreter-generator.h',
- 'interpreter/interpreter-intrinsics.cc',
- 'interpreter/interpreter-intrinsics.h',
- 'isolate-inl.h',
- 'isolate.cc',
- 'isolate.h',
- 'json-parser.cc',
- 'json-parser.h',
- 'json-stringifier.cc',
- 'json-stringifier.h',
- 'keys.h',
- 'keys.cc',
- 'label.h',
- 'layout-descriptor-inl.h',
- 'layout-descriptor.cc',
- 'layout-descriptor.h',
- 'locked-queue-inl.h',
- 'locked-queue.h',
- 'log-inl.h',
- 'log-utils.cc',
- 'log-utils.h',
- 'log.cc',
- 'log.h',
- 'lookup-cache-inl.h',
- 'lookup-cache.cc',
- 'lookup-cache.h',
- 'lookup.cc',
- 'lookup.h',
- 'map-updater.cc',
- 'map-updater.h',
- 'macro-assembler-inl.h',
- 'macro-assembler.h',
- 'machine-type.cc',
- 'machine-type.h',
- 'managed.h',
- 'messages.cc',
- 'messages.h',
- 'msan.h',
- 'objects-body-descriptors-inl.h',
- 'objects-body-descriptors.h',
- 'objects-debug.cc',
- 'objects-inl.h',
- 'objects-printer.cc',
- 'objects.cc',
- 'objects.h',
- 'objects/arguments-inl.h',
- 'objects/arguments.h',
- 'objects/bigint.cc',
- 'objects/bigint.h',
- 'objects/code-inl.h',
- 'objects/code.h',
- 'objects/compilation-cache.h',
- 'objects/compilation-cache-inl.h',
- 'objects/data-handler.h',
- 'objects/data-handler-inl.h',
- 'objects/debug-objects-inl.h',
- 'objects/debug-objects.cc',
- 'objects/debug-objects.h',
- 'objects/descriptor-array.h',
- 'objects/dictionary.h',
- 'objects/fixed-array.h',
- 'objects/fixed-array-inl.h',
- 'objects/frame-array.h',
- 'objects/frame-array-inl.h',
- 'objects/hash-table-inl.h',
- 'objects/hash-table.h',
- 'objects/intl-objects.cc',
- 'objects/intl-objects.h',
- 'objects/js-array.h',
- 'objects/js-array-inl.h',
- 'objects/js-collection.h',
- 'objects/js-collection-inl.h',
- 'objects/js-regexp.h',
- 'objects/js-regexp-inl.h',
- 'objects/literal-objects.cc',
- 'objects/literal-objects-inl.h',
- 'objects/literal-objects.h',
- 'objects/map-inl.h',
- 'objects/map.h',
- 'objects/name-inl.h',
- 'objects/name.h',
- 'objects/module-inl.h',
- 'objects/module.cc',
- 'objects/module.h',
- 'objects/object-macros.h',
- 'objects/object-macros-undef.h',
- 'objects/property-descriptor-object.h',
- 'objects/property-descriptor-object-inl.h',
- 'objects/regexp-match-info.h',
- 'objects/scope-info.cc',
- 'objects/scope-info.h',
- 'objects/script.h',
- 'objects/script-inl.h',
- 'objects/shared-function-info-inl.h',
- 'objects/shared-function-info.h',
- 'objects/string-inl.h',
- 'objects/string.h',
- 'objects/string-table.h',
- 'objects/template-objects.cc',
- 'objects/template-objects.h',
- 'ostreams.cc',
- 'ostreams.h',
- 'parsing/background-parsing-task.cc',
- 'parsing/background-parsing-task.h',
- 'parsing/duplicate-finder.h',
- 'parsing/expression-classifier.h',
- 'parsing/expression-scope-reparenter.cc',
- 'parsing/expression-scope-reparenter.h',
- 'parsing/func-name-inferrer.cc',
- 'parsing/func-name-inferrer.h',
- 'parsing/parse-info.cc',
- 'parsing/parse-info.h',
- 'parsing/parser-base.h',
- 'parsing/parser.cc',
- 'parsing/parser.h',
- 'parsing/parsing.cc',
- 'parsing/parsing.h',
- 'parsing/pattern-rewriter.cc',
- 'parsing/preparse-data-format.h',
- 'parsing/preparse-data.cc',
- 'parsing/preparse-data.h',
- 'parsing/preparsed-scope-data.cc',
- 'parsing/preparsed-scope-data.h',
- 'parsing/preparser.cc',
- 'parsing/preparser.h',
- 'parsing/rewriter.cc',
- 'parsing/rewriter.h',
- 'parsing/scanner-character-streams.cc',
- 'parsing/scanner-character-streams.h',
- 'parsing/scanner.cc',
- 'parsing/scanner.h',
- 'parsing/token.cc',
- 'parsing/token.h',
- 'pending-compilation-error-handler.cc',
- 'pending-compilation-error-handler.h',
- 'perf-jit.cc',
- 'perf-jit.h',
- 'profiler/allocation-tracker.cc',
- 'profiler/allocation-tracker.h',
- 'profiler/circular-queue-inl.h',
- 'profiler/circular-queue.h',
- 'profiler/cpu-profiler-inl.h',
- 'profiler/cpu-profiler.cc',
- 'profiler/cpu-profiler.h',
- 'profiler/heap-profiler.cc',
- 'profiler/heap-profiler.h',
- 'profiler/heap-snapshot-generator-inl.h',
- 'profiler/heap-snapshot-generator.cc',
- 'profiler/heap-snapshot-generator.h',
- 'profiler/profiler-listener.cc',
- 'profiler/profiler-listener.h',
- 'profiler/profile-generator-inl.h',
- 'profiler/profile-generator.cc',
- 'profiler/profile-generator.h',
- 'profiler/sampling-heap-profiler.cc',
- 'profiler/sampling-heap-profiler.h',
- 'profiler/strings-storage.cc',
- 'profiler/strings-storage.h',
- 'profiler/tick-sample.cc',
- 'profiler/tick-sample.h',
- 'profiler/tracing-cpu-profiler.cc',
- 'profiler/tracing-cpu-profiler.h',
- 'profiler/unbound-queue-inl.h',
- 'profiler/unbound-queue.h',
- 'property-descriptor.cc',
- 'property-descriptor.h',
- 'property-details.h',
- 'property.cc',
- 'property.h',
- 'prototype.h',
- 'regexp/bytecodes-irregexp.h',
- 'regexp/interpreter-irregexp.cc',
- 'regexp/interpreter-irregexp.h',
- 'regexp/jsregexp-inl.h',
- 'regexp/jsregexp.cc',
- 'regexp/jsregexp.h',
- 'regexp/regexp-ast.cc',
- 'regexp/regexp-ast.h',
- 'regexp/regexp-macro-assembler-irregexp-inl.h',
- 'regexp/regexp-macro-assembler-irregexp.cc',
- 'regexp/regexp-macro-assembler-irregexp.h',
- 'regexp/regexp-macro-assembler-tracer.cc',
- 'regexp/regexp-macro-assembler-tracer.h',
- 'regexp/regexp-macro-assembler.cc',
- 'regexp/regexp-macro-assembler.h',
- 'regexp/regexp-parser.cc',
- 'regexp/regexp-parser.h',
- 'regexp/regexp-stack.cc',
- 'regexp/regexp-stack.h',
- 'regexp/regexp-utils.cc',
- 'regexp/regexp-utils.h',
- 'register-configuration.cc',
- 'register-configuration.h',
- 'reglist.h',
- 'runtime-profiler.cc',
- 'runtime-profiler.h',
- 'runtime/runtime-array.cc',
- 'runtime/runtime-atomics.cc',
- 'runtime/runtime-bigint.cc',
- 'runtime/runtime-classes.cc',
- 'runtime/runtime-collections.cc',
- 'runtime/runtime-compiler.cc',
- 'runtime/runtime-date.cc',
- 'runtime/runtime-debug.cc',
- 'runtime/runtime-forin.cc',
- 'runtime/runtime-function.cc',
- 'runtime/runtime-error.cc',
- 'runtime/runtime-futex.cc',
- 'runtime/runtime-generator.cc',
- 'runtime/runtime-intl.cc',
- 'runtime/runtime-internal.cc',
- 'runtime/runtime-interpreter.cc',
- 'runtime/runtime-literals.cc',
- 'runtime/runtime-liveedit.cc',
- 'runtime/runtime-maths.cc',
- 'runtime/runtime-module.cc',
- 'runtime/runtime-numbers.cc',
- 'runtime/runtime-object.cc',
- 'runtime/runtime-operators.cc',
- 'runtime/runtime-promise.cc',
- 'runtime/runtime-proxy.cc',
- 'runtime/runtime-regexp.cc',
- 'runtime/runtime-scopes.cc',
- 'runtime/runtime-strings.cc',
- 'runtime/runtime-symbol.cc',
- 'runtime/runtime-test.cc',
- 'runtime/runtime-typedarray.cc',
- 'runtime/runtime-utils.h',
- 'runtime/runtime-wasm.cc',
- 'runtime/runtime.cc',
- 'runtime/runtime.h',
- 'safepoint-table.cc',
- 'safepoint-table.h',
- 'setup-isolate.h',
- 'signature.h',
- 'simulator-base.cc',
- 'simulator-base.h',
- 'simulator.h',
- 'snapshot/builtin-deserializer-allocator.cc',
- 'snapshot/builtin-deserializer-allocator.h',
- 'snapshot/builtin-deserializer.cc',
- 'snapshot/builtin-deserializer.h',
- 'snapshot/builtin-serializer-allocator.cc',
- 'snapshot/builtin-serializer-allocator.h',
- 'snapshot/builtin-serializer.cc',
- 'snapshot/builtin-serializer.h',
- 'snapshot/builtin-snapshot-utils.cc',
- 'snapshot/builtin-snapshot-utils.h',
- 'snapshot/code-serializer.cc',
- 'snapshot/code-serializer.h',
- 'snapshot/default-deserializer-allocator.cc',
- 'snapshot/default-deserializer-allocator.h',
- 'snapshot/default-serializer-allocator.cc',
- 'snapshot/default-serializer-allocator.h',
- 'snapshot/deserializer.cc',
- 'snapshot/deserializer.h',
- 'snapshot/natives-common.cc',
- 'snapshot/natives.h',
- 'snapshot/object-deserializer.cc',
- 'snapshot/object-deserializer.h',
- 'snapshot/partial-deserializer.cc',
- 'snapshot/partial-deserializer.h',
- 'snapshot/partial-serializer.cc',
- 'snapshot/partial-serializer.h',
- 'snapshot/serializer.cc',
- 'snapshot/serializer-common.cc',
- 'snapshot/serializer-common.h',
- 'snapshot/serializer.h',
- 'snapshot/snapshot-common.cc',
- 'snapshot/snapshot.h',
- 'snapshot/snapshot-source-sink.cc',
- 'snapshot/snapshot-source-sink.h',
- 'snapshot/startup-deserializer.cc',
- 'snapshot/startup-deserializer.h',
- 'snapshot/startup-serializer.cc',
- 'snapshot/startup-serializer.h',
- 'source-position-table.cc',
- 'source-position-table.h',
- 'source-position.cc',
- 'source-position.h',
- 'splay-tree.h',
- 'splay-tree-inl.h',
- 'startup-data-util.cc',
- 'startup-data-util.h',
- 'string-builder.cc',
- 'string-builder.h',
- 'string-case.cc',
- 'string-case.h',
- 'string-hasher-inl.h',
- 'string-hasher.h',
- 'string-search.h',
- 'string-stream.cc',
- 'string-stream.h',
- 'strtod.cc',
- 'strtod.h',
- 'ic/stub-cache.cc',
- 'ic/stub-cache.h',
- 'third_party/utf8-decoder/utf8-decoder.h',
- 'tracing/trace-event.cc',
- 'tracing/trace-event.h',
- 'tracing/traced-value.cc',
- 'tracing/traced-value.h',
- 'tracing/tracing-category-observer.cc',
- 'tracing/tracing-category-observer.h',
- 'transitions-inl.h',
- 'transitions.cc',
- 'transitions.h',
- 'trap-handler/handler-outside.cc',
- 'trap-handler/handler-shared.cc',
- 'trap-handler/trap-handler.h',
- 'trap-handler/trap-handler-internal.h',
- 'type-hints.cc',
- 'type-hints.h',
- 'unicode-inl.h',
- 'unicode.cc',
- 'unicode.h',
- 'unicode-cache-inl.h',
- 'unicode-cache.h',
- 'unicode-decoder.cc',
- 'unicode-decoder.h',
- 'uri.cc',
- 'uri.h',
- 'utils-inl.h',
- 'utils.cc',
- 'utils.h',
- 'v8.cc',
- 'v8.h',
- 'v8memory.h',
- 'v8threads.cc',
- 'v8threads.h',
- 'value-serializer.cc',
- 'value-serializer.h',
- 'vector-slot-pair.cc',
- 'vector-slot-pair.h',
- 'vector.h',
- 'version.cc',
- 'version.h',
- 'visitors.cc',
- 'visitors.h',
- 'vm-state-inl.h',
- 'vm-state.h',
- 'wasm/baseline/liftoff-assembler-defs.h',
- 'wasm/baseline/liftoff-assembler.cc',
- 'wasm/baseline/liftoff-assembler.h',
- 'wasm/baseline/liftoff-compiler.cc',
- 'wasm/baseline/liftoff-register.h',
- 'wasm/compilation-manager.cc',
- 'wasm/compilation-manager.h',
- 'wasm/decoder.h',
- 'wasm/function-body-decoder.cc',
- 'wasm/function-body-decoder.h',
- 'wasm/function-body-decoder-impl.h',
- 'wasm/leb-helper.h',
- 'wasm/local-decl-encoder.cc',
- 'wasm/local-decl-encoder.h',
- 'wasm/memory-tracing.cc',
- 'wasm/memory-tracing.h',
- 'wasm/module-compiler.cc',
- 'wasm/module-compiler.h',
- 'wasm/module-decoder.cc',
- 'wasm/module-decoder.h',
- 'wasm/signature-map.cc',
- 'wasm/signature-map.h',
- 'wasm/streaming-decoder.cc',
- 'wasm/streaming-decoder.h',
- 'wasm/wasm-api.cc',
- 'wasm/wasm-api.h',
- 'wasm/wasm-code-manager.cc',
- 'wasm/wasm-code-manager.h',
- 'wasm/wasm-code-specialization.cc',
- 'wasm/wasm-code-specialization.h',
- 'wasm/wasm-code-wrapper.cc',
- 'wasm/wasm-code-wrapper.h',
- 'wasm/wasm-constants.h',
- 'wasm/wasm-debug.cc',
- 'wasm/wasm-engine.cc',
- 'wasm/wasm-engine.h',
- 'wasm/wasm-external-refs.cc',
- 'wasm/wasm-external-refs.h',
- 'wasm/wasm-js.cc',
- 'wasm/wasm-js.h',
- 'wasm/wasm-limits.h',
- 'wasm/wasm-memory.cc',
- 'wasm/wasm-memory.h',
- 'wasm/wasm-module.cc',
- 'wasm/wasm-module.h',
- 'wasm/wasm-module-builder.cc',
- 'wasm/wasm-module-builder.h',
- 'wasm/wasm-interpreter.cc',
- 'wasm/wasm-interpreter.h',
- 'wasm/wasm-objects-inl.h',
- 'wasm/wasm-objects.cc',
- 'wasm/wasm-objects.h',
- 'wasm/wasm-opcodes.cc',
- 'wasm/wasm-opcodes.h',
- 'wasm/wasm-result.cc',
- 'wasm/wasm-result.h',
- 'wasm/wasm-serialization.cc',
- 'wasm/wasm-serialization.h',
- 'wasm/wasm-text.cc',
- 'wasm/wasm-text.h',
- 'wasm/wasm-value.h',
- 'zone/accounting-allocator.cc',
- 'zone/accounting-allocator.h',
- 'zone/zone-segment.cc',
- 'zone/zone-segment.h',
- 'zone/zone.cc',
- 'zone/zone.h',
- 'zone/zone-chunk-list.h',
- 'zone/zone-segment.cc',
- 'zone/zone-segment.h',
- 'zone/zone-allocator.h',
- 'zone/zone-containers.h',
- 'zone/zone-handle-set.h',
- 'zone/zone-list-inl.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'arm/assembler-arm-inl.h',
- 'arm/assembler-arm.cc',
- 'arm/assembler-arm.h',
- 'arm/code-stubs-arm.cc',
- 'arm/code-stubs-arm.h',
- 'arm/codegen-arm.cc',
- 'arm/constants-arm.h',
- 'arm/constants-arm.cc',
- 'arm/cpu-arm.cc',
- 'arm/deoptimizer-arm.cc',
- 'arm/disasm-arm.cc',
- 'arm/frame-constants-arm.cc',
- 'arm/frame-constants-arm.h',
- 'arm/interface-descriptors-arm.cc',
- 'arm/interface-descriptors-arm.h',
- 'arm/macro-assembler-arm.cc',
- 'arm/macro-assembler-arm.h',
- 'arm/simulator-arm.cc',
- 'arm/simulator-arm.h',
- 'arm/eh-frame-arm.cc',
- 'compiler/arm/code-generator-arm.cc',
- 'compiler/arm/instruction-codes-arm.h',
- 'compiler/arm/instruction-scheduler-arm.cc',
- 'compiler/arm/instruction-selector-arm.cc',
- 'compiler/arm/unwinding-info-writer-arm.cc',
- 'compiler/arm/unwinding-info-writer-arm.h',
- 'debug/arm/debug-arm.cc',
- 'regexp/arm/regexp-macro-assembler-arm.cc',
- 'regexp/arm/regexp-macro-assembler-arm.h',
- 'wasm/baseline/arm/liftoff-assembler-arm-defs.h',
- 'wasm/baseline/arm/liftoff-assembler-arm.h',
- ],
- }],
- ['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- 'arm64/assembler-arm64.cc',
- 'arm64/assembler-arm64.h',
- 'arm64/assembler-arm64-inl.h',
- 'arm64/codegen-arm64.cc',
- 'arm64/code-stubs-arm64.cc',
- 'arm64/code-stubs-arm64.h',
- 'arm64/constants-arm64.h',
- 'arm64/cpu-arm64.cc',
- 'arm64/decoder-arm64.cc',
- 'arm64/decoder-arm64.h',
- 'arm64/decoder-arm64-inl.h',
- 'arm64/deoptimizer-arm64.cc',
- 'arm64/disasm-arm64.cc',
- 'arm64/disasm-arm64.h',
- 'arm64/frame-constants-arm64.cc',
- 'arm64/frame-constants-arm64.h',
- 'arm64/instructions-arm64-constants.cc',
- 'arm64/instructions-arm64.cc',
- 'arm64/instructions-arm64.h',
- 'arm64/instrument-arm64.cc',
- 'arm64/instrument-arm64.h',
- 'arm64/interface-descriptors-arm64.cc',
- 'arm64/interface-descriptors-arm64.h',
- 'arm64/macro-assembler-arm64.cc',
- 'arm64/macro-assembler-arm64.h',
- 'arm64/macro-assembler-arm64-inl.h',
- 'arm64/simulator-arm64.cc',
- 'arm64/simulator-arm64.h',
- 'arm64/simulator-logic-arm64.cc',
- 'arm64/utils-arm64.cc',
- 'arm64/utils-arm64.h',
- 'arm64/eh-frame-arm64.cc',
- 'compiler/arm64/code-generator-arm64.cc',
- 'compiler/arm64/instruction-codes-arm64.h',
- 'compiler/arm64/instruction-scheduler-arm64.cc',
- 'compiler/arm64/instruction-selector-arm64.cc',
- 'compiler/arm64/unwinding-info-writer-arm64.cc',
- 'compiler/arm64/unwinding-info-writer-arm64.h',
- 'debug/arm64/debug-arm64.cc',
- 'regexp/arm64/regexp-macro-assembler-arm64.cc',
- 'regexp/arm64/regexp-macro-assembler-arm64.h',
- 'wasm/baseline/arm64/liftoff-assembler-arm64-defs.h',
- 'wasm/baseline/arm64/liftoff-assembler-arm64.h',
- ],
- }],
- ['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- 'ia32/assembler-ia32-inl.h',
- 'ia32/assembler-ia32.cc',
- 'ia32/assembler-ia32.h',
- 'ia32/code-stubs-ia32.cc',
- 'ia32/codegen-ia32.cc',
- 'ia32/cpu-ia32.cc',
- 'ia32/deoptimizer-ia32.cc',
- 'ia32/disasm-ia32.cc',
- 'ia32/frame-constants-ia32.cc',
- 'ia32/frame-constants-ia32.h',
- 'ia32/interface-descriptors-ia32.cc',
- 'ia32/macro-assembler-ia32.cc',
- 'ia32/macro-assembler-ia32.h',
- 'ia32/simulator-ia32.cc',
- 'ia32/simulator-ia32.h',
- 'ia32/sse-instr.h',
- 'compiler/ia32/code-generator-ia32.cc',
- 'compiler/ia32/instruction-codes-ia32.h',
- 'compiler/ia32/instruction-scheduler-ia32.cc',
- 'compiler/ia32/instruction-selector-ia32.cc',
- 'debug/ia32/debug-ia32.cc',
- 'regexp/ia32/regexp-macro-assembler-ia32.cc',
- 'regexp/ia32/regexp-macro-assembler-ia32.h',
- 'wasm/baseline/ia32/liftoff-assembler-ia32-defs.h',
- 'wasm/baseline/ia32/liftoff-assembler-ia32.h',
- ],
- }],
- ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- 'mips/assembler-mips.cc',
- 'mips/assembler-mips.h',
- 'mips/assembler-mips-inl.h',
- 'mips/codegen-mips.cc',
- 'mips/code-stubs-mips.cc',
- 'mips/code-stubs-mips.h',
- 'mips/constants-mips.cc',
- 'mips/constants-mips.h',
- 'mips/cpu-mips.cc',
- 'mips/deoptimizer-mips.cc',
- 'mips/disasm-mips.cc',
- 'mips/frame-constants-mips.cc',
- 'mips/frame-constants-mips.h',
- 'mips/interface-descriptors-mips.cc',
- 'mips/macro-assembler-mips.cc',
- 'mips/macro-assembler-mips.h',
- 'mips/simulator-mips.cc',
- 'mips/simulator-mips.h',
- 'compiler/mips/code-generator-mips.cc',
- 'compiler/mips/instruction-codes-mips.h',
- 'compiler/mips/instruction-scheduler-mips.cc',
- 'compiler/mips/instruction-selector-mips.cc',
- 'debug/mips/debug-mips.cc',
- 'regexp/mips/regexp-macro-assembler-mips.cc',
- 'regexp/mips/regexp-macro-assembler-mips.h',
- 'wasm/baseline/mips/liftoff-assembler-mips-defs.h',
- 'wasm/baseline/mips/liftoff-assembler-mips.h',
- ],
- }],
- ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
- 'sources': [ ### gcmole(arch:mips64el) ###
- 'mips64/assembler-mips64.cc',
- 'mips64/assembler-mips64.h',
- 'mips64/assembler-mips64-inl.h',
- 'mips64/codegen-mips64.cc',
- 'mips64/code-stubs-mips64.cc',
- 'mips64/code-stubs-mips64.h',
- 'mips64/constants-mips64.cc',
- 'mips64/constants-mips64.h',
- 'mips64/cpu-mips64.cc',
- 'mips64/deoptimizer-mips64.cc',
- 'mips64/disasm-mips64.cc',
- 'mips64/frame-constants-mips64.cc',
- 'mips64/frame-constants-mips64.h',
- 'mips64/interface-descriptors-mips64.cc',
- 'mips64/macro-assembler-mips64.cc',
- 'mips64/macro-assembler-mips64.h',
- 'mips64/simulator-mips64.cc',
- 'mips64/simulator-mips64.h',
- 'compiler/mips64/code-generator-mips64.cc',
- 'compiler/mips64/instruction-codes-mips64.h',
- 'compiler/mips64/instruction-scheduler-mips64.cc',
- 'compiler/mips64/instruction-selector-mips64.cc',
- 'debug/mips64/debug-mips64.cc',
- 'regexp/mips64/regexp-macro-assembler-mips64.cc',
- 'regexp/mips64/regexp-macro-assembler-mips64.h',
- 'wasm/baseline/mips64/liftoff-assembler-mips64-defs.h',
- 'wasm/baseline/mips64/liftoff-assembler-mips64.h',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [ ### gcmole(arch:x64) ###
- 'compiler/x64/code-generator-x64.cc',
- 'compiler/x64/instruction-codes-x64.h',
- 'compiler/x64/instruction-scheduler-x64.cc',
- 'compiler/x64/instruction-selector-x64.cc',
- 'compiler/x64/unwinding-info-writer-x64.cc',
- 'compiler/x64/unwinding-info-writer-x64.h',
- 'x64/assembler-x64-inl.h',
- 'x64/assembler-x64.cc',
- 'x64/assembler-x64.h',
- 'x64/code-stubs-x64.cc',
- 'x64/codegen-x64.cc',
- 'x64/cpu-x64.cc',
- 'x64/deoptimizer-x64.cc',
- 'x64/disasm-x64.cc',
- 'x64/eh-frame-x64.cc',
- 'x64/frame-constants-x64.cc',
- 'x64/frame-constants-x64.h',
- 'x64/interface-descriptors-x64.cc',
- 'x64/macro-assembler-x64.cc',
- 'x64/macro-assembler-x64.h',
- 'x64/simulator-x64.cc',
- 'x64/simulator-x64.h',
- 'x64/sse-instr.h',
- 'debug/x64/debug-x64.cc',
- 'regexp/x64/regexp-macro-assembler-x64.cc',
- 'regexp/x64/regexp-macro-assembler-x64.h',
- 'third_party/valgrind/valgrind.h',
- 'wasm/baseline/x64/liftoff-assembler-x64-defs.h',
- 'wasm/baseline/x64/liftoff-assembler-x64.h',
- ],
- }],
- ['v8_target_arch=="x64" and OS=="linux"', {
- 'sources': ['trap-handler/handler-inside.cc']
- }],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- 'compiler/ppc/code-generator-ppc.cc',
- 'compiler/ppc/instruction-codes-ppc.h',
- 'compiler/ppc/instruction-scheduler-ppc.cc',
- 'compiler/ppc/instruction-selector-ppc.cc',
- 'debug/ppc/debug-ppc.cc',
- 'ppc/assembler-ppc-inl.h',
- 'ppc/assembler-ppc.cc',
- 'ppc/assembler-ppc.h',
- 'ppc/code-stubs-ppc.cc',
- 'ppc/code-stubs-ppc.h',
- 'ppc/codegen-ppc.cc',
- 'ppc/constants-ppc.h',
- 'ppc/constants-ppc.cc',
- 'ppc/cpu-ppc.cc',
- 'ppc/deoptimizer-ppc.cc',
- 'ppc/disasm-ppc.cc',
- 'ppc/frame-constants-ppc.cc',
- 'ppc/frame-constants-ppc.h',
- 'ppc/interface-descriptors-ppc.cc',
- 'ppc/macro-assembler-ppc.cc',
- 'ppc/macro-assembler-ppc.h',
- 'ppc/simulator-ppc.cc',
- 'ppc/simulator-ppc.h',
- 'regexp/ppc/regexp-macro-assembler-ppc.cc',
- 'regexp/ppc/regexp-macro-assembler-ppc.h',
- 'wasm/baseline/ppc/liftoff-assembler-ppc-defs.h',
- 'wasm/baseline/ppc/liftoff-assembler-ppc.h',
- ],
- }],
- ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390) ###
- 'compiler/s390/code-generator-s390.cc',
- 'compiler/s390/instruction-codes-s390.h',
- 'compiler/s390/instruction-scheduler-s390.cc',
- 'compiler/s390/instruction-selector-s390.cc',
- 'debug/s390/debug-s390.cc',
- 'regexp/s390/regexp-macro-assembler-s390.cc',
- 'regexp/s390/regexp-macro-assembler-s390.h',
- 's390/assembler-s390.cc',
- 's390/assembler-s390.h',
- 's390/assembler-s390-inl.h',
- 's390/codegen-s390.cc',
- 's390/code-stubs-s390.cc',
- 's390/code-stubs-s390.h',
- 's390/constants-s390.cc',
- 's390/constants-s390.h',
- 's390/cpu-s390.cc',
- 's390/deoptimizer-s390.cc',
- 's390/disasm-s390.cc',
- 's390/frame-constants-s390.cc',
- 's390/frame-constants-s390.h',
- 's390/interface-descriptors-s390.cc',
- 's390/macro-assembler-s390.cc',
- 's390/macro-assembler-s390.h',
- 's390/simulator-s390.cc',
- 's390/simulator-s390.h',
- 'wasm/baseline/s390/liftoff-assembler-s390-defs.h',
- 'wasm/baseline/s390/liftoff-assembler-s390.h',
- ],
- }],
- ['OS=="win"', {
- 'variables': {
- 'gyp_generators': '<!(echo $GYP_GENERATORS)',
- },
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- # When building Official, the .lib is too large and exceeds the 2G
- # limit. This breaks it into multiple pieces to avoid the limit.
- # See http://crbug.com/485155.
- 'msvs_shard': 4,
- # This will prevent V8's .cc files conflicting with the inspector's
- # .cpp files in the same shard.
- 'msvs_settings': {
- 'VCCLCompilerTool': {
- 'ObjectFile':'$(IntDir)%(Extension)\\',
- },
- },
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- ],
- }],
- ['v8_postmortem_support=="true"', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ]
- }],
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ],
- 'conditions': [
- ['icu_use_data_file_flag==1', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
- }, { # else icu_use_data_file_flag !=1
- 'conditions': [
- ['OS=="win"', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
- }, {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
- }],
- ],
- }],
- ],
- }, { # v8_enable_i18n_support==0
- 'sources!': [
- 'builtins/builtins-intl.cc',
- 'builtins/builtins-intl.h',
- 'char-predicates.cc',
- 'intl.cc',
- 'intl.h',
- 'objects/intl-objects.cc',
- 'objects/intl-objects.h',
- 'runtime/runtime-intl.cc',
- ],
- }],
- ['OS=="win" and v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icudata',
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_libbase',
- 'type': '<(component)',
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '..',
- ],
- 'sources': [
- 'base/adapters.h',
- 'base/atomic-utils.h',
- 'base/atomicops.h',
- 'base/atomicops_internals_atomicword_compat.h',
- 'base/atomicops_internals_portable.h',
- 'base/atomicops_internals_std.h',
- 'base/base-export.h',
- 'base/bits.cc',
- 'base/bits.h',
- 'base/build_config.h',
- 'base/compiler-specific.h',
- 'base/cpu.cc',
- 'base/cpu.h',
- 'base/division-by-constant.cc',
- 'base/division-by-constant.h',
- 'base/debug/stack_trace.cc',
- 'base/debug/stack_trace.h',
- 'base/export-template.h',
- 'base/file-utils.cc',
- 'base/file-utils.h',
- 'base/flags.h',
- 'base/format-macros.h',
- 'base/free_deleter.h',
- 'base/functional.cc',
- 'base/functional.h',
- 'base/hashmap.h',
- 'base/hashmap-entry.h',
- 'base/ieee754.cc',
- 'base/ieee754.h',
- 'base/iterator.h',
- 'base/lazy-instance.h',
- 'base/logging.cc',
- 'base/logging.h',
- 'base/macros.h',
- 'base/once.cc',
- 'base/once.h',
- 'base/optional.h',
- 'base/page-allocator.cc',
- 'base/page-allocator.h',
- 'base/platform/elapsed-timer.h',
- 'base/platform/time.cc',
- 'base/platform/time.h',
- 'base/platform/condition-variable.cc',
- 'base/platform/condition-variable.h',
- 'base/platform/mutex.cc',
- 'base/platform/mutex.h',
- 'base/platform/platform.h',
- 'base/platform/semaphore.cc',
- 'base/platform/semaphore.h',
- 'base/ring-buffer.h',
- 'base/safe_conversions.h',
- 'base/safe_conversions_impl.h',
- 'base/safe_math.h',
- 'base/safe_math_impl.h',
- 'base/sys-info.cc',
- 'base/sys-info.h',
- 'base/template-utils.h',
- 'base/timezone-cache.h',
- 'base/tsan.h',
- 'base/utils/random-number-generator.cc',
- 'base/utils/random-number-generator.h',
- ],
- 'target_conditions': [
- ['OS=="android" and _toolset=="target"', {
- 'libraries': [
- '-llog',
- ],
- 'include_dirs': [
- 'src/common/android/include',
- ],
- }],
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_BASE_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'USING_V8_BASE_SHARED',
- ],
- },
- }],
- ['OS=="linux"', {
- 'link_settings': {
- 'libraries': [
- '-ldl',
- '-lrt'
- ],
- },
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-linux.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="android"', {
- 'sources': [
- 'base/debug/stack_trace_android.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- 'link_settings': {
- 'target_conditions': [
- ['_toolset=="host" and host_os!="mac"', {
- # Only include libdl and librt on host builds because they
- # are included by default on Android target builds, and we
- # don't want to re-include them here since this will change
- # library order and break (see crbug.com/469973).
- # These libraries do not exist on Mac hosted builds.
- 'libraries': [
- '-ldl',
- '-lrt'
- ]
- }]
- ]
- },
- 'conditions': [
- ['host_os=="mac"', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'sources': [
- 'base/platform/platform-macos.cc'
- ]
- }, {
- 'sources': [
- 'base/platform/platform-linux.cc'
- ]
- }],
- ],
- }, {
- 'sources': [
- 'base/platform/platform-linux.cc'
- ]
- }],
- ],
- },
- ],
- ['OS=="qnx"', {
- 'link_settings': {
- 'target_conditions': [
- ['_toolset=="host" and host_os=="linux"', {
- 'libraries': [
- '-lrt'
- ],
- }],
- ['_toolset=="target"', {
- 'libraries': [
- '-lbacktrace'
- ],
- }],
- ],
- },
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- 'base/qnx-math.h'
- ],
- 'target_conditions': [
- ['_toolset=="host" and host_os=="linux"', {
- 'sources': [
- 'base/platform/platform-linux.cc'
- ],
- }],
- ['_toolset=="host" and host_os=="mac"', {
- 'sources': [
- 'base/platform/platform-macos.cc'
- ],
- }],
- ['_toolset=="target"', {
- 'sources': [
- 'base/platform/platform-qnx.cc'
- ],
- }],
- ],
- },
- ],
- ['OS=="freebsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-freebsd.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="openbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-openbsd.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="netbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-openbsd.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ],
- }
- ],
- ['OS=="aix"', {
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-aix.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc'
- ]},
- ],
- ['OS=="fuchsia"', {
- 'sources': [
- 'base/debug/stack_trace_fuchsia.cc',
- 'base/platform/platform-fuchsia.cc',
- ]},
- ],
- ['OS=="solaris"', {
- 'link_settings': {
- 'libraries': [
- '-lnsl -lrt',
- ]},
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-solaris.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- ],
- }
- ],
- ['OS=="mac"', {
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-macos.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- 'base/platform/platform-posix-time.h',
- 'base/platform/platform-posix-time.cc',
- ]},
- ],
- ['OS=="win"', {
- 'defines': [
- '_CRT_RAND_S' # for rand_s()
- ],
- 'variables': {
- 'gyp_generators': '<!(echo $GYP_GENERATORS)',
- },
- 'conditions': [
- ['gyp_generators=="make"', {
- 'variables': {
- 'build_env': '<!(uname -o)',
- },
- 'conditions': [
- ['build_env=="Cygwin"', {
- 'sources': [
- 'base/debug/stack_trace_posix.cc',
- 'base/platform/platform-cygwin.cc',
- 'base/platform/platform-posix.h',
- 'base/platform/platform-posix.cc',
- ],
- }, {
- 'sources': [
- 'base/debug/stack_trace_win.cc',
- 'base/platform/platform-win32.cc',
- 'base/win32-headers.h',
- ],
- }],
- ],
- 'link_settings': {
- 'libraries': [ '-lwinmm', '-lws2_32' ],
- },
- }, {
- 'sources': [
- 'base/debug/stack_trace_win.cc',
- 'base/platform/platform-win32.cc',
- 'base/win32-headers.h',
- ],
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- 'link_settings': {
- 'libraries': [
- '-ldbghelp.lib',
- '-lshlwapi.lib',
- '-lwinmm.lib',
- '-lws2_32.lib'
- ],
- },
- }],
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_libplatform',
- 'type': '<(component)',
- 'variables': {
- 'optimize': 'max',
- },
- 'dependencies': [
- 'v8_libbase',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- '../include',
- ],
- 'sources': [
- '../include/libplatform/libplatform.h',
- '../include/libplatform/libplatform-export.h',
- '../include/libplatform/v8-tracing.h',
- 'libplatform/default-background-task-runner.cc',
- 'libplatform/default-background-task-runner.h',
- 'libplatform/default-foreground-task-runner.cc',
- 'libplatform/default-foreground-task-runner.h',
- 'libplatform/default-platform.cc',
- 'libplatform/default-platform.h',
- 'libplatform/task-queue.cc',
- 'libplatform/task-queue.h',
- 'libplatform/tracing/trace-buffer.cc',
- 'libplatform/tracing/trace-buffer.h',
- 'libplatform/tracing/trace-config.cc',
- 'libplatform/tracing/trace-object.cc',
- 'libplatform/tracing/trace-writer.cc',
- 'libplatform/tracing/trace-writer.h',
- 'libplatform/tracing/tracing-controller.cc',
- 'libplatform/worker-thread.cc',
- 'libplatform/worker-thread.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'direct_dependent_settings': {
- 'defines': [ 'USING_V8_PLATFORM_SHARED' ],
- },
- 'defines': [ 'BUILDING_V8_PLATFORM_SHARED' ],
- }]
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- },
- {
- 'target_name': 'v8_libsampler',
- 'type': 'static_library',
- 'variables': {
- 'optimize': 'max',
- },
- 'dependencies': [
- 'v8_libbase',
- ],
- 'include_dirs+': [
- '..',
- '../include',
- ],
- 'sources': [
- 'libsampler/sampler.cc',
- 'libsampler/sampler.h'
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- },
- {
- 'target_name': 'natives_blob',
- 'type': 'none',
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'dependencies': ['js2c#host'],
- }, {
- 'dependencies': ['js2c'],
- }],
- ],
- 'actions': [{
- 'action_name': 'concatenate_natives_blob',
- 'inputs': [
- '../tools/concatenate-files.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob_host.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
- ],
- }],
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
- ],
- }],
- ],
- }],
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ]
- },
- {
- 'target_name': 'js2c',
- 'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- 'variables': {
- 'library_files': [
- 'js/macros.py',
- 'messages.h',
- 'js/prologue.js',
- 'js/array.js',
- 'js/typedarray.js',
- 'js/messages.js',
- 'js/spread.js',
- 'debug/mirrors.js',
- 'debug/debug.js',
- 'debug/liveedit.js',
- ],
- 'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- 'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
- 'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'library_files': ['js/intl.js'],
- }],
- ],
- },
- 'actions': [
- {
- 'action_name': 'js2c',
- 'inputs': [
- '../tools/js2c.py',
- '<@(library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- 'CORE',
- '<@(library_files)',
- ],
- },
- {
- 'action_name': 'js2c_bin',
- 'inputs': [
- '../tools/js2c.py',
- '<@(library_files)',
- ],
- 'outputs': ['<@(libraries_bin_file)'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- 'CORE',
- '<@(library_files)',
- '--startup_blob', '<@(libraries_bin_file)',
- '--nojs',
- ],
- },
- {
- 'action_name': 'js2c_extras',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_extra_library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- 'EXTRAS',
- '<@(v8_extra_library_files)',
- ],
- },
- {
- 'action_name': 'js2c_extras_bin',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_extra_library_files)',
- ],
- 'outputs': ['<@(libraries_extras_bin_file)'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- 'EXTRAS',
- '<@(v8_extra_library_files)',
- '--startup_blob', '<@(libraries_extras_bin_file)',
- '--nojs',
- ],
- },
- {
- 'action_name': 'js2c_experimental_extras',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_experimental_extra_library_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- ],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'EXPERIMENTAL_EXTRAS',
- '<@(v8_experimental_extra_library_files)',
- ],
- },
- {
- 'action_name': 'js2c_experimental_extras_bin',
- 'inputs': [
- '../tools/js2c.py',
- '<@(v8_experimental_extra_library_files)',
- ],
- 'outputs': ['<@(libraries_experimental_extras_bin_file)'],
- 'action': [
- 'python',
- '../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'EXPERIMENTAL_EXTRAS',
- '<@(v8_experimental_extra_library_files)',
- '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
- '--nojs',
- ],
- },
- ],
- },
- {
- 'target_name': 'postmortem-metadata',
- 'type': 'none',
- 'variables': {
- 'heapobject_files': [
- 'objects.h',
- 'objects-inl.h',
- 'objects/code.h',
- 'objects/code-inl.h',
- 'objects/fixed-array.h',
- 'objects/fixed-array-inl.h',
- 'objects/js-array.h',
- 'objects/js-array-inl.h',
- 'objects/js-regexp.h',
- 'objects/js-regexp-inl.h',
- 'objects/map.h',
- 'objects/map-inl.h',
- 'objects/script.h',
- 'objects/script-inl.h',
- 'objects/shared-function-info.h',
- 'objects/shared-function-info-inl.h',
- 'objects/string.h',
- 'objects/string-inl.h',
- ],
- },
- 'actions': [
- {
- 'action_name': 'gen-postmortem-metadata',
- 'inputs': [
- '../tools/gen-postmortem-metadata.py',
- '<@(heapobject_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ],
- 'action': [
- 'python',
- '../tools/gen-postmortem-metadata.py',
- '<@(_outputs)',
- '<@(heapobject_files)'
- ]
- }
- ]
- },
- {
- 'target_name': 'mksnapshot',
- 'type': 'executable',
- 'dependencies': [
- 'v8_base',
- 'v8_init',
- 'v8_libbase',
- 'v8_libplatform',
- 'v8_nosnapshot',
- ],
- 'include_dirs+': [
- '..',
- '<(DEPTH)',
- ],
- 'sources': [
- 'snapshot/mksnapshot.cc',
- ],
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ]
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- },
- {
- 'target_name': 'v8_dump_build_config',
- 'type': 'none',
- 'variables': {
- },
- 'actions': [
- {
- 'action_name': 'v8_dump_build_config',
- 'inputs': [
- '../tools/testrunner/utils/dump_build_config_gyp.py',
- ],
- 'outputs': [
- '<(PRODUCT_DIR)/v8_build_config.json',
- ],
- 'action': [
- 'python',
- '../tools/testrunner/utils/dump_build_config_gyp.py',
- '<(PRODUCT_DIR)/v8_build_config.json',
- 'dcheck_always_on=<(dcheck_always_on)',
- 'is_asan=<(asan)',
- 'is_cfi=<(cfi_vptr)',
- 'is_component_build=<(component)',
- 'is_debug=<(CONFIGURATION_NAME)',
- # Not available in gyp.
- 'is_gcov_coverage=0',
- 'is_msan=<(msan)',
- 'is_tsan=<(tsan)',
- # Not available in gyp.
- 'is_ubsan_vptr=0',
- 'target_cpu=<(target_arch)',
- 'v8_enable_i18n_support=<(v8_enable_i18n_support)',
- 'v8_enable_verify_predictable=<(v8_enable_verify_predictable)',
- 'v8_target_cpu=<(v8_target_arch)',
- 'v8_use_snapshot=<(v8_use_snapshot)',
- ],
- },
- ],
- },
- {
- 'target_name': 'v8_monolith',
- 'type': 'static_library',
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../include',
- ],
- },
- 'actions': [
- {
- 'action_name': 'build_with_gn',
- 'inputs': [
- '../tools/node/build_gn.py',
- ],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/obj/libv8_monolith.a',
- '<(INTERMEDIATE_DIR)/args.gn',
- ],
- 'action': [
- '../tools/node/build_gn.py',
- '<(CONFIGURATION_NAME)',
- '../',
- '<(INTERMEDIATE_DIR)',
- 'v8_promise_internal_field_count=<(v8_promise_internal_field_count)',
- 'target_cpu="<(target_arch)"',
- 'target_os="<(OS)"',
- 'v8_target_cpu="<(v8_target_arch)"',
- 'v8_embedder_string="<(v8_embedder_string)"',
- 'v8_use_snapshot=<(v8_use_snapshot)',
- 'v8_optimized_debug=<(v8_optimized_debug)',
- 'v8_enable_disassembler=<(v8_enable_disassembler)',
- 'v8_postmortem_support=<(v8_postmortem_support)',
- ],
- },
- ],
- },
- ],
-}
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index db561b9918..bf4240fa70 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -5,10 +5,13 @@
#ifndef V8_V8_H_
#define V8_V8_H_
-#include "include/v8.h"
#include "src/globals.h"
namespace v8 {
+
+class Platform;
+class StartupData;
+
namespace internal {
class V8 : public AllStatic {
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index d34bce7746..528de5836c 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MEMORY_H_
-#define V8_MEMORY_H_
+#ifndef V8_V8MEMORY_H_
+#define V8_V8MEMORY_H_
namespace v8 {
namespace internal {
@@ -76,4 +76,4 @@ class Memory {
} // namespace internal
} // namespace v8
-#endif // V8_MEMORY_H_
+#endif // V8_V8MEMORY_H_
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 5e2ab19877..30f6a7a729 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -18,7 +18,7 @@
#include "src/objects.h"
#include "src/snapshot/code-serializer.h"
#include "src/transitions.h"
-#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -161,6 +161,8 @@ enum class ArrayBufferViewTag : uint8_t {
kUint32Array = 'D',
kFloat32Array = 'f',
kFloat64Array = 'F',
+ kBigInt64Array = 'q',
+ kBigUint64Array = 'Q',
kDataView = '?',
};
@@ -1084,13 +1086,13 @@ bool ValueDeserializer::ReadRawBytes(size_t length, const void** data) {
void ValueDeserializer::TransferArrayBuffer(
uint32_t transfer_id, Handle<JSArrayBuffer> array_buffer) {
if (array_buffer_transfer_map_.is_null()) {
- array_buffer_transfer_map_ =
- isolate_->global_handles()->Create(*NumberDictionary::New(isolate_, 0));
+ array_buffer_transfer_map_ = isolate_->global_handles()->Create(
+ *SimpleNumberDictionary::New(isolate_, 0));
}
- Handle<NumberDictionary> dictionary =
+ Handle<SimpleNumberDictionary> dictionary =
array_buffer_transfer_map_.ToHandleChecked();
- Handle<NumberDictionary> new_dictionary =
- NumberDictionary::Set(dictionary, transfer_id, array_buffer);
+ Handle<SimpleNumberDictionary> new_dictionary =
+ SimpleNumberDictionary::Set(dictionary, transfer_id, array_buffer);
if (!new_dictionary.is_identical_to(dictionary)) {
GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
array_buffer_transfer_map_ =
@@ -1182,15 +1184,16 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
return ReadJSMap();
case SerializationTag::kBeginJSSet:
return ReadJSSet();
- case SerializationTag::kArrayBuffer:
- return ReadJSArrayBuffer();
- case SerializationTag::kArrayBufferTransfer: {
+ case SerializationTag::kArrayBuffer: {
const bool is_shared = false;
- return ReadTransferredJSArrayBuffer(is_shared);
+ return ReadJSArrayBuffer(is_shared);
+ }
+ case SerializationTag::kArrayBufferTransfer: {
+ return ReadTransferredJSArrayBuffer();
}
case SerializationTag::kSharedArrayBuffer: {
const bool is_shared = true;
- return ReadTransferredJSArrayBuffer(is_shared);
+ return ReadJSArrayBuffer(is_shared);
}
case SerializationTag::kWasmModule:
return ReadWasmModule();
@@ -1572,8 +1575,25 @@ MaybeHandle<JSSet> ValueDeserializer::ReadJSSet() {
return scope.CloseAndEscape(set);
}
-MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
+ bool is_shared) {
uint32_t id = next_id_++;
+ if (is_shared) {
+ uint32_t clone_id;
+ Local<SharedArrayBuffer> sab_value;
+ if (!ReadVarint<uint32_t>().To(&clone_id) || delegate_ == nullptr ||
+ !delegate_
+ ->GetSharedArrayBufferFromId(
+ reinterpret_cast<v8::Isolate*>(isolate_), clone_id)
+ .ToLocal(&sab_value)) {
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSArrayBuffer);
+ return MaybeHandle<JSArrayBuffer>();
+ }
+ Handle<JSArrayBuffer> array_buffer = Utils::OpenHandle(*sab_value);
+ DCHECK_EQ(is_shared, array_buffer->is_shared());
+ AddObjectWithID(id, array_buffer);
+ return array_buffer;
+ }
uint32_t byte_length;
if (!ReadVarint<uint32_t>().To(&byte_length) ||
byte_length > static_cast<size_t>(end_ - position_)) {
@@ -1592,22 +1612,20 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
return array_buffer;
}
-MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer(
- bool is_shared) {
+MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadTransferredJSArrayBuffer() {
uint32_t id = next_id_++;
uint32_t transfer_id;
- Handle<NumberDictionary> transfer_map;
+ Handle<SimpleNumberDictionary> transfer_map;
if (!ReadVarint<uint32_t>().To(&transfer_id) ||
!array_buffer_transfer_map_.ToHandle(&transfer_map)) {
return MaybeHandle<JSArrayBuffer>();
}
int index = transfer_map->FindEntry(isolate_, transfer_id);
- if (index == NumberDictionary::kNotFound) {
+ if (index == SimpleNumberDictionary::kNotFound) {
return MaybeHandle<JSArrayBuffer>();
}
Handle<JSArrayBuffer> array_buffer(
JSArrayBuffer::cast(transfer_map->ValueAt(index)), isolate_);
- DCHECK_EQ(is_shared, array_buffer->is_shared());
AddObjectWithID(id, array_buffer);
return array_buffer;
}
@@ -1628,6 +1646,16 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
uint32_t id = next_id_++;
ExternalArrayType external_array_type = kExternalInt8Array;
unsigned element_size = 0;
+
+ if (!FLAG_harmony_bigint) {
+ // Refuse to construct BigInt64Arrays unless the flag is on.
+ ArrayBufferViewTag cast_tag = static_cast<ArrayBufferViewTag>(tag);
+ if (cast_tag == ArrayBufferViewTag::kBigInt64Array ||
+ cast_tag == ArrayBufferViewTag::kBigUint64Array) {
+ return MaybeHandle<JSArrayBufferView>();
+ }
+ }
+
switch (static_cast<ArrayBufferViewTag>(tag)) {
case ArrayBufferViewTag::kDataView: {
Handle<JSDataView> data_view =
@@ -1714,8 +1742,8 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
}
if (result.is_null()) {
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
- result = wasm::SyncCompile(isolate_, &thrower,
- wasm::ModuleWireBytes(wire_bytes));
+ result = isolate_->wasm_engine()->SyncCompile(
+ isolate_, &thrower, wasm::ModuleWireBytes(wire_bytes));
}
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
uint32_t id = next_id_++;
@@ -1744,7 +1772,7 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
const bool is_shared = true;
Handle<JSArrayBuffer> buffer;
- if (!ReadTransferredJSArrayBuffer(is_shared).ToHandle(&buffer)) {
+ if (!ReadJSArrayBuffer(is_shared).ToHandle(&buffer)) {
return MaybeHandle<WasmMemoryObject>();
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index a272fa0945..f719eb8206 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -266,8 +266,9 @@ class ValueDeserializer {
MaybeHandle<JSRegExp> ReadJSRegExp() WARN_UNUSED_RESULT;
MaybeHandle<JSMap> ReadJSMap() WARN_UNUSED_RESULT;
MaybeHandle<JSSet> ReadJSSet() WARN_UNUSED_RESULT;
- MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer() WARN_UNUSED_RESULT;
- MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer(bool is_shared)
+ MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer(bool is_shared)
+ WARN_UNUSED_RESULT;
+ MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer()
WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
@@ -300,7 +301,7 @@ class ValueDeserializer {
// Always global handles.
Handle<FixedArray> id_map_;
- MaybeHandle<NumberDictionary> array_buffer_transfer_map_;
+ MaybeHandle<SimpleNumberDictionary> array_buffer_transfer_map_;
DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
};
diff --git a/deps/v8/src/version.h b/deps/v8/src/version.h
index 20d9c71fe7..93b3eb902d 100644
--- a/deps/v8/src/version.h
+++ b/deps/v8/src/version.h
@@ -5,12 +5,16 @@
#ifndef V8_VERSION_H_
#define V8_VERSION_H_
+#include <cstdint>
+
#include "src/base/functional.h"
-#include "src/vector.h"
namespace v8 {
namespace internal {
+template <typename T>
+class Vector;
+
class Version {
public:
// Return the various version components.
diff --git a/deps/v8/src/visitors.cc b/deps/v8/src/visitors.cc
index 98911f1c28..a877fc30ef 100644
--- a/deps/v8/src/visitors.cc
+++ b/deps/v8/src/visitors.cc
@@ -9,16 +9,19 @@
namespace v8 {
namespace internal {
-#define DECLARE_TAG(ignore1, name, ignore2) name,
-const char* const
- VisitorSynchronization::kTags[VisitorSynchronization::kNumberOfSyncTags] = {
- ROOT_ID_LIST(DECLARE_TAG)};
-#undef DECLARE_TAG
-
-#define DECLARE_TAG(ignore1, ignore2, name) name,
-const char* const VisitorSynchronization::kTagNames
- [VisitorSynchronization::kNumberOfSyncTags] = {ROOT_ID_LIST(DECLARE_TAG)};
-#undef DECLARE_TAG
+const char* RootVisitor::RootName(Root root) {
+ switch (root) {
+#define ROOT_CASE(root_id, description) \
+ case Root::root_id: \
+ return description;
+ ROOT_ID_LIST(ROOT_CASE)
+#undef ROOT_CASE
+ case Root::kNumberOfRoots:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/visitors.h
index 7696df8faf..31ee0795d4 100644
--- a/deps/v8/src/visitors.h
+++ b/deps/v8/src/visitors.h
@@ -13,45 +13,42 @@ namespace internal {
class CodeDataContainer;
class Object;
-#define ROOT_ID_LIST(V) \
- V(kStringTable, "string_table", "(Internalized strings)") \
- V(kExternalStringsTable, "external_strings_table", "(External strings)") \
- V(kStrongRootList, "strong_root_list", "(Strong roots)") \
- V(kSmiRootList, "smi_root_list", "(Smi roots)") \
- V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
- V(kTop, "top", "(Isolate)") \
- V(kRelocatable, "relocatable", "(Relocatable)") \
- V(kDebug, "debug", "(Debugger)") \
- V(kCompilationCache, "compilationcache", "(Compilation cache)") \
- V(kHandleScope, "handlescope", "(Handle scope)") \
- V(kDispatchTable, "dispatchtable", "(Dispatch table)") \
- V(kBuiltins, "builtins", "(Builtins)") \
- V(kGlobalHandles, "globalhandles", "(Global handles)") \
- V(kEternalHandles, "eternalhandles", "(Eternal handles)") \
- V(kThreadManager, "threadmanager", "(Thread manager)") \
- V(kStrongRoots, "strong roots", "(Strong roots)") \
- V(kExtensions, "Extensions", "(Extensions)")
+#define ROOT_ID_LIST(V) \
+ V(kStringTable, "(Internalized strings)") \
+ V(kExternalStringsTable, "(External strings)") \
+ V(kStrongRootList, "(Strong roots)") \
+ V(kSmiRootList, "(Smi roots)") \
+ V(kBootstrapper, "(Bootstrapper)") \
+ V(kTop, "(Isolate)") \
+ V(kRelocatable, "(Relocatable)") \
+ V(kDebug, "(Debugger)") \
+ V(kCompilationCache, "(Compilation cache)") \
+ V(kHandleScope, "(Handle scope)") \
+ V(kDispatchTable, "(Dispatch table)") \
+ V(kBuiltins, "(Builtins)") \
+ V(kGlobalHandles, "(Global handles)") \
+ V(kEternalHandles, "(Eternal handles)") \
+ V(kThreadManager, "(Thread manager)") \
+ V(kStrongRoots, "(Strong roots)") \
+ V(kExtensions, "(Extensions)") \
+ V(kCodeFlusher, "(Code flusher)") \
+ V(kPartialSnapshotCache, "(Partial snapshot cache)") \
+ V(kWeakCollections, "(Weak collections)") \
+ V(kWrapperTracing, "(Wrapper tracing)") \
+ V(kUnknown, "(Unknown)")
class VisitorSynchronization : public AllStatic {
public:
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
enum SyncTag { ROOT_ID_LIST(DECLARE_ENUM) kNumberOfSyncTags };
#undef DECLARE_ENUM
-
- static const char* const kTags[kNumberOfSyncTags];
- static const char* const kTagNames[kNumberOfSyncTags];
};
enum class Root {
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
ROOT_ID_LIST(DECLARE_ENUM)
#undef DECLARE_ENUM
- // TODO(ulan): Merge with the ROOT_ID_LIST.
- kCodeFlusher,
- kPartialSnapshotCache,
- kWeakCollections,
- kWrapperTracing,
- kUnknown
+ kNumberOfRoots
};
// Abstract base class for visiting, and optionally modifying, the
@@ -62,11 +59,13 @@ class RootVisitor BASE_EMBEDDED {
// Visits a contiguous arrays of pointers in the half-open range
// [start, end). Any or all of the values may be modified on return.
- virtual void VisitRootPointers(Root root, Object** start, Object** end) = 0;
+ virtual void VisitRootPointers(Root root, const char* description,
+ Object** start, Object** end) = 0;
// Handy shorthand for visiting a single pointer.
- virtual void VisitRootPointer(Root root, Object** p) {
- VisitRootPointers(root, p, p + 1);
+ virtual void VisitRootPointer(Root root, const char* description,
+ Object** p) {
+ VisitRootPointers(root, description, p, p + 1);
}
// Intended for serialization/deserialization checking: insert, or
@@ -74,6 +73,8 @@ class RootVisitor BASE_EMBEDDED {
// Also used for marking up GC roots in heap snapshots.
// TODO(ulan): Remove this.
virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
+
+ static const char* RootName(Root root);
};
// Abstract base class for visiting, and optionally modifying, the
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index e68fb0847d..2b6cc5c057 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -6,8 +6,6 @@ bradnelson@chromium.org
clemensh@chromium.org
gdeepti@chromium.org
eholk@chromium.org
-mtrofin@chromium.org
-rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 7f7993d34f..ef8893f005 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+#ifndef V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
+#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("arm " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 8d28c2b21c..09bce6d450 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+#ifndef V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
+#define V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("arm64 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index a8b5b32bdc..35943554cc 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+#ifndef V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
+#define V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -16,12 +16,20 @@ namespace wasm {
namespace liftoff {
+// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
+// is located at ebp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
inline Operand GetStackSlot(uint32_t index) {
- // ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
- // is located at ebp-24.
- constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(
- ebp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return Operand(ebp, -kFirstStackSlotOffset - offset);
+}
+
+inline Operand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return Operand(ebp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
@@ -41,26 +49,45 @@ static constexpr Register kCCallLastArgAddrReg = eax;
static constexpr DoubleRegister kScratchDoubleReg = xmm7;
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ sub_sp_32(0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
- sub(esp, Immediate(bytes));
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.sub_sp_32(bytes);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
- if (value.to_i32() == 0) {
- xor_(reg.gp(), reg.gp());
- } else {
- mov(reg.gp(), Immediate(value.to_i32()));
- }
+ TurboAssembler::Move(
+ reg.gp(),
+ Immediate(reinterpret_cast<Address>(value.to_i32()), rmode));
break;
- case kWasmF32: {
- Register tmp = GetUnusedRegister(kGpReg).gp();
- mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
- movd(reg.fp(), tmp);
+ case kWasmI64: {
+ DCHECK(RelocInfo::IsNone(rmode));
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::Move(reg.low_gp(), Immediate(low_word));
+ TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
default:
UNREACHABLE();
}
@@ -86,20 +113,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
+ DCHECK_EQ(type.value_type() == kWasmI64, dst.is_pair());
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
Operand src_op = offset_reg == no_reg
? Operand(src_addr, offset_imm)
: Operand(src_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register src = GetUnusedRegister(kGpReg, pinned).gp();
- mov(src, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(src, src, offset_reg);
- }
- src_op = Operand(src_addr, src, times_1, 0);
- }
if (protected_load_pc) *protected_load_pc = pc_offset();
+
switch (type.value()) {
case LoadType::kI32Load8U:
movzx_b(dst.gp(), src_op);
@@ -107,18 +130,61 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kI32Load8S:
movsx_b(dst.gp(), src_op);
break;
+ case LoadType::kI64Load8U:
+ movzx_b(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load8S:
+ movsx_b(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
case LoadType::kI32Load16U:
movzx_w(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
movsx_w(dst.gp(), src_op);
break;
+ case LoadType::kI64Load16U:
+ movzx_w(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load16S:
+ movsx_w(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
case LoadType::kI32Load:
mov(dst.gp(), src_op);
break;
+ case LoadType::kI64Load32U:
+ mov(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load32S:
+ mov(dst.low_gp(), src_op);
+ mov(dst.high_gp(), dst.low_gp());
+ sar(dst.high_gp(), 31);
+ break;
+ case LoadType::kI64Load: {
+ // Compute the operand for the load of the upper half.
+ DCHECK(is_uint31(offset_imm + 4));
+ Operand upper_src_op =
+ offset_reg == no_reg
+ ? Operand(src_addr, offset_imm + 4)
+ : Operand(src_addr, offset_reg, times_1, offset_imm + 4);
+ // The high word has to be mov'ed first, such that this is the protected
+ // instruction. The mov of the low word cannot segfault.
+ mov(dst.high_gp(), upper_src_op);
+ mov(dst.low_gp(), src_op);
+ break;
+ }
case LoadType::kF32Load:
movss(dst.fp(), src_op);
break;
+ case LoadType::kF64Load:
+ movsd(dst.fp(), src_op);
+ break;
default:
UNREACHABLE();
}
@@ -128,21 +194,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
+ DCHECK_EQ(type.value_type() == kWasmI64, src.is_pair());
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm)
: Operand(dst_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
- mov(dst, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(dst, dst, offset_reg);
- }
- dst_op = Operand(dst_addr, dst, times_1, 0);
- }
if (protected_store_pc) *protected_store_pc = pc_offset();
+
switch (type.value()) {
+ case StoreType::kI64Store8:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store8:
// Only the lower 4 registers can be addressed as 8-bit registers.
if (src.gp().is_byte_register()) {
@@ -153,80 +218,139 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
mov_b(dst_op, byte_src);
}
break;
+ case StoreType::kI64Store16:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store16:
mov_w(dst_op, src.gp());
break;
+ case StoreType::kI64Store32:
+ src = src.low();
+ V8_FALLTHROUGH;
case StoreType::kI32Store:
mov(dst_op, src.gp());
break;
+ case StoreType::kI64Store: {
+ // Compute the operand for the store of the upper half.
+ DCHECK(is_uint31(offset_imm + 4));
+ Operand upper_dst_op =
+ offset_reg == no_reg
+ ? Operand(dst_addr, offset_imm + 4)
+ : Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
+ // The high word has to be mov'ed first, such that this is the protected
+ // instruction. The mov of the low word cannot segfault.
+ mov(upper_dst_op, src.high_gp());
+ mov(dst_op, src.low_gp());
+ break;
+ }
case StoreType::kF32Store:
movss(dst_op, src.fp());
break;
+ case StoreType::kF64Store:
+ movsd(dst_op, src.fp());
+ break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
+ uint32_t caller_slot_idx,
+ ValueType type) {
Operand src(ebp, kPointerSize * (caller_slot_idx + 1));
- if (dst.is_gp()) {
- mov(dst.gp(), src);
- } else {
- movss(dst.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ mov(dst.gp(), src);
+ break;
+ case kWasmF32:
+ movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index);
- Spill(dst_index, reg);
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
} else {
push(liftoff::GetStackSlot(src_index));
pop(liftoff::GetStackSlot(dst_index));
}
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
- reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
- if (reg != dst) Move(dst, reg);
+ reg.is_pair()
+ ? LiftoffRegister::ForPair(LiftoffRegister(eax), LiftoffRegister(edx))
+ : reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- // The caller should check that the registers are not equal. For most
- // occurences, this is already guaranteed, so no need to check within this
- // method.
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK_EQ(dst.reg_class(), src.reg_class());
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- mov(dst.gp(), src.gp());
+ DCHECK_EQ(kWasmI32, type);
+ mov(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ if (type == kWasmF32) {
+ movss(dst, src);
} else {
- movsd(dst.fp(), src.fp());
+ DCHECK_EQ(kWasmF64, type);
+ movsd(dst, src);
}
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- mov(dst, reg.gp());
- } else {
- movsd(dst, reg.fp());
+ switch (type) {
+ case kWasmI32:
+ mov(dst, reg.gp());
+ break;
+ case kWasmI64:
+ mov(dst, reg.low_gp());
+ mov(liftoff::GetHalfStackSlot(2 * index + 1), reg.high_gp());
+ break;
+ case kWasmF32:
+ movss(dst, reg.fp());
+ break;
+ case kWasmF64:
+ movsd(dst, reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
mov(dst, Immediate(value.to_i32()));
break;
+ case kWasmI64: {
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ mov(dst, Immediate(low_word));
+ mov(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ break;
+ }
case kWasmF32:
mov(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
@@ -235,16 +359,32 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
Operand src = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- mov(reg.gp(), src);
- } else {
- movsd(reg.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ mov(reg.gp(), src);
+ break;
+ case kWasmI64:
+ mov(reg.low_gp(), src);
+ mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ movss(reg.fp(), src);
+ break;
+ case kWasmF64:
+ movsd(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
+ mov(reg, liftoff::GetHalfStackSlot(half_index));
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
lea(dst, Operand(lhs, rhs, times_1, 0));
@@ -286,8 +426,11 @@ COMMUTATIVE_I32_BINOP(xor, xor_)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
- void (Assembler::*emit_shift)(Register)) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, lhs, rhs);
+ void (Assembler::*emit_shift)(Register),
+ LiftoffRegList pinned) {
+ pinned.set(dst);
+ pinned.set(lhs);
+ pinned.set(rhs);
// If dst is ecx, compute into a tmp register first, then move to ecx.
if (dst == ecx) {
Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
@@ -302,7 +445,8 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// first. If lhs is ecx, lhs is now the tmp register.
Register tmp_reg = no_reg;
if (rhs != ecx) {
- if (lhs == ecx || assm->cache_state()->is_used(LiftoffRegister(ecx))) {
+ if (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
+ pinned.has(LiftoffRegister(ecx))) {
tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm->mov(tmp_reg, ecx);
if (lhs == ecx) lhs = tmp_reg;
@@ -319,30 +463,19 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl);
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl, pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl);
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl, pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl);
-}
-
-bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- Register tmp_byte_reg = dst;
- // Only the lower 4 registers can be addressed as 8-bit registers.
- if (!dst.is_byte_register()) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
- tmp_byte_reg = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
- }
-
- test(src, src);
- setcc(zero, tmp_byte_reg);
- movzx_b(dst, tmp_byte_reg);
- return true;
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl, pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -432,22 +565,141 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
-void LiftoffAssembler::emit_i32_test(Register reg) { test(reg, reg); }
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorps(dst, src);
+ }
+}
+
+void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ addsd(dst, rhs);
+ }
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- cmp(lhs, rhs);
+void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ subsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ subsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ mulsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorpd(dst, src);
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ switch (type) {
+ case kWasmI32:
+ cmp(lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, kWasmI32);
+ test(lhs, lhs);
+ }
+
j(cond, label);
}
+namespace liftoff {
+inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
+ Register tmp_byte_reg = dst;
+ // Only the lower 4 registers can be addressed as 8-bit registers.
+ if (!dst.is_byte_register()) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
+ // {GetUnusedRegister()} may insert move instructions to spill registers to
+ // the stack. This is OK because {mov} does not change the status flags.
+ tmp_byte_reg = assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ }
+
+ assm->setcc(cond, tmp_byte_reg);
+ assm->movzx_b(dst, tmp_byte_reg);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ if (rhs != no_reg) {
+ cmp(lhs, rhs);
+ } else {
+ test(lhs, lhs);
+ }
+ liftoff::setcc_32(this, cond, dst);
+}
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label cont;
+ Label not_nan;
+
+ ucomiss(lhs, rhs);
+ // IF PF is one, one of the operands was Nan. This needs special handling.
+ j(parity_odd, &not_nan, Label::kNear);
+ // Return 1 for f32.ne, 0 for all other cases.
+ if (cond == not_equal) {
+ mov(dst, Immediate(1));
+ } else {
+ xor_(dst, dst);
+ }
+ jmp(&cont, Label::kNear);
+ bind(&not_nan);
+
+ liftoff::setcc_32(this, cond, dst);
+ bind(&cont);
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) {
- Register limit = GetUnusedRegister(kGpReg).gp();
- mov(limit, Immediate(ExternalReference::address_of_stack_limit(isolate())));
- cmp(esp, Operand(limit, 0));
+ cmp(esp,
+ Operand(Immediate(ExternalReference::address_of_stack_limit(isolate()))));
j(below_equal, ool_code);
}
@@ -462,27 +714,50 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
+ uint32_t src_index,
+ RegPairHalf half) {
switch (src.loc()) {
case VarState::kStack:
- DCHECK_NE(kWasmF64, src.type()); // TODO(clemensh): Implement this.
- push(liftoff::GetStackSlot(src_index));
+ if (src.type() == kWasmF64) {
+ DCHECK_EQ(kLowWord, half);
+ push(liftoff::GetHalfStackSlot(2 * src_index - 1));
+ }
+ push(liftoff::GetHalfStackSlot(2 * src_index +
+ (half == kLowWord ? 0 : 1)));
break;
case VarState::kRegister:
- PushCallerFrameSlot(src.reg());
+ if (src.type() == kWasmI64) {
+ PushCallerFrameSlot(
+ half == kLowWord ? src.reg().low() : src.reg().high(), kWasmI32);
+ } else {
+ PushCallerFrameSlot(src.reg(), src.type());
+ }
break;
- case VarState::kI32Const:
- push(Immediate(src.i32_const()));
+ case VarState::KIntConst:
+ // The high word is the sign extension of the low word.
+ push(Immediate(half == kLowWord ? src.i32_const()
+ : src.i32_const() >> 31));
break;
}
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- if (reg.is_gp()) {
- push(reg.gp());
- } else {
- sub(esp, Immediate(kPointerSize));
- movss(Operand(esp, 0), reg.fp());
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ push(reg.gp());
+ break;
+ case kWasmF32:
+ sub(esp, Immediate(sizeof(float)));
+ movss(Operand(esp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ sub(esp, Immediate(sizeof(double)));
+ movsd(Operand(esp, 0), reg.fp());
+ break;
+ default:
+ // Also kWasmI64 is unreachable, as it will always be pushed as two halfs.
+ UNREACHABLE();
}
}
@@ -571,6 +846,17 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ add(esp, Immediate(kPointerSize));
+ call(Operand(esp, -4));
+ } else {
+ call(target);
+ }
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
sub(esp, Immediate(size));
mov(addr, esp);
@@ -584,4 +870,4 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
+#endif // V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index 3eef1e1960..26f59c68be 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -11,6 +11,10 @@
#include "src/ia32/assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64.h"
#endif
namespace v8 {
@@ -19,8 +23,6 @@ namespace wasm {
#if V8_TARGET_ARCH_IA32
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
@@ -30,17 +32,31 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs =
#elif V8_TARGET_ARCH_X64
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
-
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
-#else
+#elif V8_TARGET_ARCH_MIPS
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7, v0, v1>();
-constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
+ f22, f24>();
+
+#elif V8_TARGET_ARCH_MIPS64
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7, v0, v1>();
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
+ f22, f24, f26>();
+
+#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
@@ -49,12 +65,45 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+
constexpr Condition kEqual = equal;
+constexpr Condition kUnequal = not_equal;
+constexpr Condition kSignedLessThan = less;
+constexpr Condition kSignedLessEqual = less_equal;
+constexpr Condition kSignedGreaterThan = greater;
+constexpr Condition kSignedGreaterEqual = greater_equal;
+constexpr Condition kUnsignedLessThan = below;
+constexpr Condition kUnsignedLessEqual = below_equal;
+constexpr Condition kUnsignedGreaterThan = above;
constexpr Condition kUnsignedGreaterEqual = above_equal;
+
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+
+constexpr Condition kEqual = eq;
+constexpr Condition kUnequal = ne;
+constexpr Condition kSignedLessThan = lt;
+constexpr Condition kSignedLessEqual = le;
+constexpr Condition kSignedGreaterThan = gt;
+constexpr Condition kSignedGreaterEqual = ge;
+constexpr Condition kUnsignedLessThan = ult;
+constexpr Condition kUnsignedLessEqual = ule;
+constexpr Condition kUnsignedGreaterThan = ugt;
+constexpr Condition kUnsignedGreaterEqual = uge;
+
#else
+
// On unimplemented platforms, just make this compile.
constexpr Condition kEqual = static_cast<Condition>(0);
+constexpr Condition kUnequal = static_cast<Condition>(0);
+constexpr Condition kSignedLessThan = static_cast<Condition>(0);
+constexpr Condition kSignedLessEqual = static_cast<Condition>(0);
+constexpr Condition kSignedGreaterThan = static_cast<Condition>(0);
+constexpr Condition kSignedGreaterEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedLessThan = static_cast<Condition>(0);
+constexpr Condition kUnsignedLessEqual = static_cast<Condition>(0);
+constexpr Condition kUnsignedGreaterThan = static_cast<Condition>(0);
constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
+
#endif
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 121cfeea6a..09b8229dc1 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -31,20 +31,45 @@ class StackTransferRecipe {
struct RegisterMove {
LiftoffRegister dst;
LiftoffRegister src;
- constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src)
- : dst(dst), src(src) {}
+ ValueType type;
+ constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src,
+ ValueType type)
+ : dst(dst), src(src), type(type) {}
};
struct RegisterLoad {
- LiftoffRegister dst;
- bool is_constant_load; // otherwise load it from the stack.
- union {
- uint32_t stack_slot;
- WasmValue constant;
+ enum LoadKind : uint8_t {
+ kConstant, // load a constant value into a register.
+ kStack, // fill a register from a stack slot.
+ kHalfStack // fill one half of a register pair from half a stack slot.
};
- RegisterLoad(LiftoffRegister dst, WasmValue constant)
- : dst(dst), is_constant_load(true), constant(constant) {}
- RegisterLoad(LiftoffRegister dst, uint32_t stack_slot)
- : dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
+
+ LiftoffRegister dst;
+ LoadKind kind;
+ ValueType type;
+ int32_t value; // i32 constant value or stack index, depending on kind.
+
+ // Named constructors.
+ static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
+ if (constant.type() == kWasmI32) {
+ return {dst, kConstant, kWasmI32, constant.to_i32()};
+ }
+ DCHECK_EQ(kWasmI64, constant.type());
+ DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
+ return {dst, kConstant, kWasmI64, constant.to_i32_unchecked()};
+ }
+ static RegisterLoad Stack(LiftoffRegister dst, int32_t stack_index,
+ ValueType type) {
+ return {dst, kStack, type, stack_index};
+ }
+ static RegisterLoad HalfStack(LiftoffRegister dst,
+ int32_t half_stack_index) {
+ return {dst, kHalfStack, kWasmI32, half_stack_index};
+ }
+
+ private:
+ RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
+ int32_t value)
+ : dst(dst), kind(kind), type(type), value(value) {}
};
public:
@@ -55,15 +80,17 @@ class StackTransferRecipe {
// First, execute register moves. Then load constants and stack values into
// registers.
- if ((move_dst_regs & move_src_regs).is_empty()) {
+ if ((move_dst_regs_ & move_src_regs_).is_empty()) {
// No overlap in src and dst registers. Just execute the moves in any
// order.
- for (RegisterMove& rm : register_moves) asm_->Move(rm.dst, rm.src);
- register_moves.clear();
+ for (RegisterMove& rm : register_moves_) {
+ asm_->Move(rm.dst, rm.src, rm.type);
+ }
+ register_moves_.clear();
} else {
// Keep use counters of src registers.
uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
- for (RegisterMove& rm : register_moves) {
+ for (RegisterMove& rm : register_moves_) {
++src_reg_use_count[rm.src.liftoff_code()];
}
// Now repeatedly iterate the list of register moves, and execute those
@@ -73,11 +100,11 @@ class StackTransferRecipe {
// register to the stack, add a RegisterLoad to reload it later, and
// continue.
uint32_t next_spill_slot = asm_->cache_state()->stack_height();
- while (!register_moves.empty()) {
+ while (!register_moves_.empty()) {
int executed_moves = 0;
- for (auto& rm : register_moves) {
+ for (auto& rm : register_moves_) {
if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
- asm_->Move(rm.dst, rm.src);
+ asm_->Move(rm.dst, rm.src, rm.type);
++executed_moves;
DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
--src_reg_use_count[rm.src.liftoff_code()];
@@ -89,53 +116,64 @@ class StackTransferRecipe {
if (executed_moves == 0) {
// There is a cycle. Spill one register, then continue.
// TODO(clemensh): Use an unused register if available.
- LiftoffRegister spill_reg = register_moves.back().src;
- asm_->Spill(next_spill_slot, spill_reg);
+ RegisterMove& rm = register_moves_.back();
+ LiftoffRegister spill_reg = rm.src;
+ asm_->Spill(next_spill_slot, spill_reg, rm.type);
// Remember to reload into the destination register later.
- LoadStackSlot(register_moves.back().dst, next_spill_slot);
+ LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
src_reg_use_count[spill_reg.liftoff_code()] = 0;
++next_spill_slot;
executed_moves = 1;
}
- register_moves.erase(register_moves.end() - executed_moves,
- register_moves.end());
+ register_moves_.erase(register_moves_.end() - executed_moves,
+ register_moves_.end());
}
}
- for (RegisterLoad& rl : register_loads) {
- if (rl.is_constant_load) {
- asm_->LoadConstant(rl.dst, rl.constant);
- } else {
- asm_->Fill(rl.dst, rl.stack_slot);
+ for (RegisterLoad& rl : register_loads_) {
+ switch (rl.kind) {
+ case RegisterLoad::kConstant:
+ asm_->LoadConstant(rl.dst, rl.type == kWasmI64
+ ? WasmValue(int64_t{rl.value})
+ : WasmValue(int32_t{rl.value}));
+ break;
+ case RegisterLoad::kStack:
+ asm_->Fill(rl.dst, rl.value, rl.type);
+ break;
+ case RegisterLoad::kHalfStack:
+ // As half of a register pair, {rl.dst} must be a gp register.
+ asm_->FillI64Half(rl.dst.gp(), rl.value);
+ break;
}
}
- register_loads.clear();
+ register_loads_.clear();
}
void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
uint32_t dst_index, uint32_t src_index) {
const VarState& dst = dst_state.stack_state[dst_index];
const VarState& src = __ cache_state()->stack_state[src_index];
+ DCHECK_EQ(dst.type(), src.type());
switch (dst.loc()) {
case VarState::kStack:
switch (src.loc()) {
case VarState::kStack:
if (src_index == dst_index) break;
- asm_->MoveStackValue(dst_index, src_index);
+ asm_->MoveStackValue(dst_index, src_index, src.type());
break;
case VarState::kRegister:
- asm_->Spill(dst_index, src.reg());
+ asm_->Spill(dst_index, src.reg(), src.type());
break;
- case VarState::kI32Const:
- asm_->Spill(dst_index, WasmValue(src.i32_const()));
+ case VarState::KIntConst:
+ asm_->Spill(dst_index, src.constant());
break;
}
break;
case VarState::kRegister:
LoadIntoRegister(dst.reg(), src, src_index);
break;
- case VarState::kI32Const:
+ case VarState::KIntConst:
DCHECK_EQ(dst, src);
break;
}
@@ -146,40 +184,80 @@ class StackTransferRecipe {
uint32_t src_index) {
switch (src.loc()) {
case VarState::kStack:
- LoadStackSlot(dst, src_index);
+ LoadStackSlot(dst, src_index, src.type());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
- if (dst != src.reg()) MoveRegister(dst, src.reg());
+ if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
+ break;
+ case VarState::KIntConst:
+ LoadConstant(dst, src.constant());
break;
- case VarState::kI32Const:
- LoadConstant(dst, WasmValue(src.i32_const()));
+ }
+ }
+
+ void LoadI64HalfIntoRegister(LiftoffRegister dst,
+ const LiftoffAssembler::VarState& src,
+ uint32_t index, RegPairHalf half) {
+ // Use CHECK such that the remaining code is statically dead if
+ // {kNeedI64RegPair} is false.
+ CHECK(kNeedI64RegPair);
+ DCHECK_EQ(kWasmI64, src.type());
+ switch (src.loc()) {
+ case VarState::kStack:
+ LoadI64HalfStackSlot(dst, 2 * index + (half == kLowWord ? 0 : 1));
+ break;
+ case VarState::kRegister: {
+ LiftoffRegister src_half =
+ half == kLowWord ? src.reg().low() : src.reg().high();
+ if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
+ break;
+ }
+ case VarState::KIntConst:
+ int32_t value = src.i32_const();
+ // The high word is the sign extension of the low word.
+ if (half == kHighWord) value = value >> 31;
+ LoadConstant(dst, WasmValue(value));
break;
}
}
- void MoveRegister(LiftoffRegister dst, LiftoffRegister src) {
+ void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK(!move_dst_regs.has(dst));
- move_dst_regs.set(dst);
- move_src_regs.set(src);
- register_moves.emplace_back(dst, src);
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ DCHECK_EQ(reg_class_for(type), src.reg_class());
+ if (src.is_pair()) {
+ DCHECK_EQ(kWasmI64, type);
+ if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
+ if (dst.high() != src.high())
+ MoveRegister(dst.high(), src.high(), kWasmI32);
+ return;
+ }
+ DCHECK(!move_dst_regs_.has(dst));
+ move_dst_regs_.set(dst);
+ move_src_regs_.set(src);
+ register_moves_.emplace_back(dst, src, type);
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
- register_loads.emplace_back(dst, value);
+ register_loads_.push_back(RegisterLoad::Const(dst, value));
+ }
+
+ void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
+ ValueType type) {
+ register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
}
- void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index) {
- register_loads.emplace_back(dst, stack_index);
+ void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
+ register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
- std::vector<RegisterMove> register_moves;
- std::vector<RegisterLoad> register_loads;
- LiftoffRegList move_dst_regs;
- LiftoffRegList move_src_regs;
+ std::vector<RegisterMove> register_moves_;
+ std::vector<RegisterLoad> register_loads_;
+ LiftoffRegList move_dst_regs_;
+ LiftoffRegList move_src_regs_;
LiftoffAssembler* const asm_;
};
@@ -301,16 +379,16 @@ LiftoffRegister LiftoffAssembler::PopToRegister(RegClass rc,
switch (slot.loc()) {
case VarState::kStack: {
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- Fill(reg, cache_state_.stack_height());
+ Fill(reg, cache_state_.stack_height(), slot.type());
return reg;
}
case VarState::kRegister:
DCHECK_EQ(rc, slot.reg_class());
cache_state_.dec_used(slot.reg());
return slot.reg();
- case VarState::kI32Const: {
+ case VarState::KIntConst: {
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- LoadConstant(reg, WasmValue(slot.i32_const()));
+ LoadConstant(reg, slot.constant());
return reg;
}
}
@@ -335,6 +413,8 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
// ^target_stack_base
uint32_t stack_height = cache_state_.stack_height();
uint32_t target_stack_height = target.stack_height();
+ DCHECK_LE(target_stack_height, stack_height);
+ DCHECK_LE(arity, target_stack_height);
uint32_t stack_base = stack_height - arity;
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
@@ -352,11 +432,11 @@ void LiftoffAssembler::Spill(uint32_t index) {
case VarState::kStack:
return;
case VarState::kRegister:
- Spill(index, slot.reg());
+ Spill(index, slot.reg(), slot.type());
cache_state_.dec_used(slot.reg());
break;
- case VarState::kI32Const:
- Spill(index, WasmValue(slot.i32_const()));
+ case VarState::KIntConst:
+ Spill(index, slot.constant());
break;
}
slot.MakeStack();
@@ -372,19 +452,17 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
- Spill(i, slot.reg());
+ Spill(i, slot.reg(), slot.type());
slot.MakeStack();
}
cache_state_.reset_used_registers();
}
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
- compiler::CallDescriptor* call_desc) {
+ compiler::CallDescriptor* call_descriptor,
+ Register* target,
+ LiftoffRegister* explicit_context) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
- // Parameter 0 is the wasm context.
- constexpr size_t kFirstActualParameter = 1;
- DCHECK_EQ(kFirstActualParameter + num_params, call_desc->ParameterCount());
-
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
@@ -394,66 +472,134 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue;
- Spill(idx, slot.reg());
+ Spill(idx, slot.reg(), slot.type());
slot.MakeStack();
}
StackTransferRecipe stack_transfers(this);
+ LiftoffRegList param_regs;
+
+ // Move the explicit context (if any) into the correct context register.
+ compiler::LinkageLocation context_loc =
+ call_descriptor->GetInputLocation(kInputShift);
+ DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
+ LiftoffRegister context_reg(Register::from_code(context_loc.AsRegister()));
+ param_regs.set(context_reg);
+ if (explicit_context && *explicit_context != context_reg) {
+ stack_transfers.MoveRegister(context_reg, *explicit_context, kWasmIntPtr);
+ }
// Now move all parameter values into the right slot for the call.
- // Process parameters backward, such that we can just pop values from the
- // stack.
+ // Don't pop values yet, such that the stack height is still correct when
+ // executing the {stack_transfers}.
+ // Process parameters backwards, such that pushes of caller frame slots are
+ // in the correct order.
+ uint32_t param_base = cache_state_.stack_height() - num_params;
+ uint32_t call_desc_input_idx =
+ static_cast<uint32_t>(call_descriptor->InputCount());
for (uint32_t i = num_params; i > 0; --i) {
- uint32_t param = i - 1;
+ const uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
- RegClass rc = reg_class_for(type);
- compiler::LinkageLocation loc = call_desc->GetInputLocation(
- param + kFirstActualParameter + kInputShift);
- const VarState& slot = cache_state_.stack_state.back();
- uint32_t stack_idx = cache_state_.stack_height() - 1;
- if (loc.IsRegister()) {
- DCHECK(!loc.IsAnyRegister());
- int reg_code = loc.AsRegister();
- LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
- stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ const bool is_pair = kNeedI64RegPair && type == kWasmI64;
+ const int num_lowered_params = is_pair ? 2 : 1;
+ const uint32_t stack_idx = param_base + param;
+ const VarState& slot = cache_state_.stack_state[stack_idx];
+ // Process both halfs of register pair separately, because they are passed
+ // as separate parameters. One or both of them could end up on the stack.
+ for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
+ const RegPairHalf half =
+ is_pair && lowered_idx == 0 ? kHighWord : kLowWord;
+ --call_desc_input_idx;
+ compiler::LinkageLocation loc =
+ call_descriptor->GetInputLocation(call_desc_input_idx);
+ if (loc.IsRegister()) {
+ DCHECK(!loc.IsAnyRegister());
+ RegClass rc = is_pair ? kGpReg : reg_class_for(type);
+ LiftoffRegister reg = LiftoffRegister::from_code(rc, loc.AsRegister());
+ param_regs.set(reg);
+ if (is_pair) {
+ stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
+ } else {
+ stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
+ }
+ } else {
+ DCHECK(loc.IsCallerFrameSlot());
+ PushCallerFrameSlot(slot, stack_idx, half);
+ }
+ }
+ }
+ // {call_desc_input_idx} should point after the context parameter now.
+ DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
+
+ // If the target register overlaps with a parameter register, then move the
+ // target to another free register, or spill to the stack.
+ if (target && param_regs.has(LiftoffRegister(*target))) {
+ // Try to find another free register.
+ LiftoffRegList free_regs = kGpCacheRegList.MaskOut(param_regs);
+ if (!free_regs.is_empty()) {
+ LiftoffRegister new_target = free_regs.GetFirstRegSet();
+ stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
+ kWasmIntPtr);
+ *target = new_target.gp();
} else {
- DCHECK(loc.IsCallerFrameSlot());
- PushCallerFrameSlot(slot, stack_idx);
+ PushCallerFrameSlot(LiftoffRegister(*target), kWasmIntPtr);
+ *target = no_reg;
}
- cache_state_.stack_state.pop_back();
}
// Execute the stack transfers before filling the context register.
stack_transfers.Execute();
+ // Pop parameters from the value stack.
+ auto stack_end = cache_state_.stack_state.end();
+ cache_state_.stack_state.erase(stack_end - num_params, stack_end);
+
// Reset register use counters.
cache_state_.reset_used_registers();
- // Fill the wasm context into the right register.
- compiler::LinkageLocation context_loc =
- call_desc->GetInputLocation(kInputShift);
- DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
- int context_reg_code = context_loc.AsRegister();
- LiftoffRegister context_reg(Register::from_code(context_reg_code));
- FillContextInto(context_reg.gp());
+ // Reload the context from the stack.
+ if (!explicit_context) {
+ FillContextInto(context_reg.gp());
+ }
}
void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
- compiler::CallDescriptor* call_desc) {
- size_t return_count = call_desc->ReturnCount();
- DCHECK_EQ(return_count, sig->return_count());
+ compiler::CallDescriptor* call_descriptor) {
+ const size_t return_count = sig->return_count();
if (return_count != 0) {
DCHECK_EQ(1, return_count);
- compiler::LinkageLocation return_loc = call_desc->GetReturnLocation(0);
- int return_reg_code = return_loc.AsRegister();
ValueType return_type = sig->GetReturn(0);
- LiftoffRegister return_reg =
- LiftoffRegister::from_code(reg_class_for(return_type), return_reg_code);
+ const bool need_pair = kNeedI64RegPair && return_type == kWasmI64;
+ DCHECK_EQ(need_pair ? 2 : 1, call_descriptor->ReturnCount());
+ RegClass rc = need_pair ? kGpReg : reg_class_for(return_type);
+ LiftoffRegister return_reg = LiftoffRegister::from_code(
+ rc, call_descriptor->GetReturnLocation(0).AsRegister());
+ DCHECK(GetCacheRegList(rc).has(return_reg));
+ if (need_pair) {
+ LiftoffRegister high_reg = LiftoffRegister::from_code(
+ rc, call_descriptor->GetReturnLocation(1).AsRegister());
+ DCHECK(GetCacheRegList(rc).has(high_reg));
+ return_reg = LiftoffRegister::ForPair(return_reg, high_reg);
+ }
DCHECK(!cache_state_.is_used(return_reg));
PushRegister(return_type, return_reg);
}
}
+void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
+ ValueType type) {
+ DCHECK_EQ(dst.reg_class(), src.reg_class());
+ if (kNeedI64RegPair && dst.is_pair()) {
+ // Use the {StackTransferRecipe} to move pairs, as the registers in the
+ // pairs might overlap.
+ StackTransferRecipe(this).MoveRegister(dst, src, type);
+ } else if (dst.is_gp()) {
+ Move(dst.gp(), src.gp(), type);
+ } else {
+ Move(dst.fp(), src.fp(), type);
+ }
+}
+
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
@@ -468,8 +614,14 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
auto* slot = &cache_state_.stack_state[idx];
- if (!slot->is_reg() || slot->reg() != reg) continue;
- Spill(idx, reg);
+ if (!slot->is_reg() || !slot->reg().overlaps(reg)) continue;
+ if (slot->reg().is_pair()) {
+ // Make sure to decrement *both* registers in a pair, because the
+ // {clear_used} call below only clears one of them.
+ cache_state_.dec_used(slot->reg().low());
+ cache_state_.dec_used(slot->reg().high());
+ }
+ Spill(idx, slot->reg(), slot->type());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
@@ -486,10 +638,6 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
}
}
-uint32_t LiftoffAssembler::GetTotalFrameSlotCount() const {
- return num_locals() + kMaxValueStackHeight;
-}
-
std::ostream& operator<<(std::ostream& os, VarState slot) {
os << WasmOpcodes::TypeName(slot.type()) << ":";
switch (slot.loc()) {
@@ -497,7 +645,7 @@ std::ostream& operator<<(std::ostream& os, VarState slot) {
return os << "s";
case VarState::kRegister:
return os << slot.reg();
- case VarState::kI32Const:
+ case VarState::KIntConst:
return os << "c" << slot.i32_const();
}
UNREACHABLE();
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index b91f6d7c88..99d9814dea 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -8,8 +8,6 @@
#include <iosfwd>
#include <memory>
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
@@ -29,35 +27,35 @@ struct ModuleEnv;
class LiftoffAssembler : public TurboAssembler {
public:
- // TODO(clemensh): Remove this limitation by allocating more stack space if
- // needed.
- static constexpr int kMaxValueStackHeight = 8;
-
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr uint32_t kStackSlotSize = 8;
+ static constexpr ValueType kWasmIntPtr =
+ kPointerSize == 8 ? kWasmI64 : kWasmI32;
+
class VarState {
public:
- enum Location : uint8_t { kStack, kRegister, kI32Const };
+ enum Location : uint8_t { kStack, kRegister, KIntConst };
explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
explicit VarState(ValueType type, LiftoffRegister r)
: loc_(kRegister), type_(type), reg_(r) {
DCHECK_EQ(r.reg_class(), reg_class_for(type));
}
- explicit VarState(ValueType type, uint32_t i32_const)
- : loc_(kI32Const), type_(type), i32_const_(i32_const) {
+ explicit VarState(ValueType type, int32_t i32_const)
+ : loc_(KIntConst), type_(type), i32_const_(i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
bool operator==(const VarState& other) const {
if (loc_ != other.loc_) return false;
+ if (type_ != other.type_) return false;
switch (loc_) {
case kStack:
return true;
case kRegister:
return reg_ == other.reg_;
- case kI32Const:
+ case KIntConst:
return i32_const_ == other.i32_const_;
}
UNREACHABLE();
@@ -67,16 +65,23 @@ class LiftoffAssembler : public TurboAssembler {
bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
bool is_reg() const { return loc_ == kRegister; }
- bool is_const() const { return loc_ == kI32Const; }
+ bool is_const() const { return loc_ == KIntConst; }
ValueType type() const { return type_; }
Location loc() const { return loc_; }
- uint32_t i32_const() const {
- DCHECK_EQ(loc_, kI32Const);
+ int32_t i32_const() const {
+ DCHECK_EQ(loc_, KIntConst);
return i32_const_;
}
+ WasmValue constant() const {
+ DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK_EQ(loc_, KIntConst);
+ return type_ == kWasmI32 ? WasmValue(i32_const_)
+ : WasmValue(int64_t{i32_const_});
+ }
+
Register gp_reg() const { return reg().gp(); }
DoubleRegister fp_reg() const { return reg().fp(); }
LiftoffRegister reg() const {
@@ -95,7 +100,7 @@ class LiftoffAssembler : public TurboAssembler {
union {
LiftoffRegister reg_; // used if loc_ == kRegister
- uint32_t i32_const_; // used if loc_ == kI32Const
+ int32_t i32_const_; // used if loc_ == KIntConst
};
};
@@ -117,6 +122,11 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t stack_base = 0;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegList available_regs =
+ kGpCacheRegList & ~used_registers & ~pinned;
+ return available_regs.GetNumRegsSet() >= 2;
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return has_unused_register(candidates, pinned);
@@ -130,9 +140,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned = {}) const {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegister low = pinned.set(unused_register(kGpReg, pinned));
+ LiftoffRegister high = unused_register(kGpReg, pinned);
+ return LiftoffRegister::ForPair(low, high);
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
- return unused_register(candidates);
+ return unused_register(candidates, pinned);
}
LiftoffRegister unused_register(LiftoffRegList candidates,
@@ -142,22 +157,31 @@ class LiftoffAssembler : public TurboAssembler {
}
void inc_used(LiftoffRegister reg) {
+ if (reg.is_pair()) {
+ inc_used(reg.low());
+ inc_used(reg.high());
+ return;
+ }
used_registers.set(reg);
DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
++register_use_count[reg.liftoff_code()];
}
// Returns whether this was the last use.
- bool dec_used(LiftoffRegister reg) {
+ void dec_used(LiftoffRegister reg) {
DCHECK(is_used(reg));
+ if (reg.is_pair()) {
+ dec_used(reg.low());
+ dec_used(reg.high());
+ return;
+ }
int code = reg.liftoff_code();
DCHECK_LT(0, register_use_count[code]);
- if (--register_use_count[code] != 0) return false;
- used_registers.clear(reg);
- return true;
+ if (--register_use_count[code] == 0) used_registers.clear(reg);
}
bool is_used(LiftoffRegister reg) const {
+ if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
bool used = used_registers.has(reg);
DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
return used;
@@ -239,6 +263,12 @@ class LiftoffAssembler : public TurboAssembler {
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ if (kNeedI64RegPair && rc == kGpRegPair) {
+ LiftoffRegList candidates = kGpCacheRegList;
+ LiftoffRegister low = pinned.set(GetUnusedRegister(candidates, pinned));
+ LiftoffRegister high = GetUnusedRegister(candidates, pinned);
+ return LiftoffRegister::ForPair(low, high);
+ }
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
@@ -270,18 +300,36 @@ class LiftoffAssembler : public TurboAssembler {
void SpillLocals();
void SpillAllRegisters();
+ // Call this method whenever spilling something, such that the number of used
+ // spill slot can be tracked and the stack frame will be allocated big enough.
+ void RecordUsedSpillSlot(uint32_t index) {
+ if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
+ }
+
// Load parameters into the right registers / stack slots for the call.
- void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ // Move {*target} into another register if needed and update {*target} to that
+ // register, or {no_reg} if target was spilled to the stack.
+ void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
+ Register* target = nullptr,
+ LiftoffRegister* explicit_context = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
+
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
- inline void ReserveStackSpace(uint32_t bytes);
+ // This function emits machine code to prepare the stack frame, before the
+ // size of the stack frame is known. It returns an offset in the machine code
+ // which can later be patched (via {PatchPrepareStackFrame)} when the size of
+ // the frame is known.
+ inline uint32_t PrepareStackFrame();
+ inline void PatchPrepareStackFrame(uint32_t offset, uint32_t stack_slots);
- inline void LoadConstant(LiftoffRegister, WasmValue);
+ inline void LoadConstant(LiftoffRegister, WasmValue,
+ RelocInfo::Mode rmode = RelocInfo::NONE);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
inline void FillContextInto(Register dst);
@@ -291,16 +339,18 @@ class LiftoffAssembler : public TurboAssembler {
inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister src, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr);
- inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx);
- inline void MoveStackValue(uint32_t dst_index, uint32_t src_index);
+ inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
+ ValueType);
+ inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
- inline void MoveToReturnRegister(LiftoffRegister);
- // TODO(clemensh): Pass the type to {Move}, to emit more efficient code.
- inline void Move(LiftoffRegister dst, LiftoffRegister src);
+ inline void MoveToReturnRegister(LiftoffRegister src, ValueType);
+ inline void Move(Register dst, Register src, ValueType);
+ inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
- inline void Spill(uint32_t index, LiftoffRegister);
+ inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
- inline void Fill(LiftoffRegister, uint32_t index);
+ inline void Fill(LiftoffRegister, uint32_t index, ValueType);
+ inline void FillI64Half(Register, uint32_t half_index);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
@@ -309,29 +359,49 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
- inline void emit_i32_shl(Register dst, Register lhs, Register rhs);
- inline void emit_i32_sar(Register dst, Register lhs, Register rhs);
- inline void emit_i32_shr(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
+ inline void emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
+ inline void emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned = {});
// i32 unops.
- inline bool emit_i32_eqz(Register dst, Register src);
inline bool emit_i32_clz(Register dst, Register src);
inline bool emit_i32_ctz(Register dst, Register src);
inline bool emit_i32_popcnt(Register dst, Register src);
inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs);
+ // f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
+ // f32 unops.
+ inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
+
+ // f64 binops.
+ inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+ inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
+
+ // f64 unops.
+ inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
- inline void emit_i32_test(Register);
- inline void emit_i32_compare(Register, Register);
inline void emit_jump(Label*);
- inline void emit_cond_jump(Condition, Label*);
+ inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
+ Register rhs = no_reg);
+ // Set {dst} to 1 if condition holds, 0 otherwise.
+ inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
+ Register rhs = no_reg);
+ inline void emit_f32_set_cond(Condition, Register dst, DoubleRegister lhs,
+ DoubleRegister rhs);
inline void StackCheck(Label* ool_code);
@@ -340,8 +410,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void AssertUnreachable(AbortReason reason);
// Push a value to the stack (will become a caller frame slot).
- inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index);
- inline void PushCallerFrameSlot(LiftoffRegister reg);
+ inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index,
+ RegPairHalf half);
+ inline void PushCallerFrameSlot(LiftoffRegister reg, ValueType type);
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);
@@ -358,8 +429,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void CallC(ExternalReference ext_ref, uint32_t num_params);
inline void CallNativeWasmCode(Address addr);
-
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
+ // Indirect call: If {target == no_reg}, then pop the target from the stack.
+ inline void CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target);
// Reserve space in the current frame, store address to space in {addr}.
inline void AllocateStackSlot(Register addr, uint32_t size);
@@ -372,7 +446,9 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals);
- uint32_t GetTotalFrameSlotCount() const;
+ uint32_t GetTotalFrameSlotCount() const {
+ return num_locals_ + num_used_spill_slots_;
+ }
ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
@@ -389,6 +465,9 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
+ bool did_bailout() { return bailout_reason_ != nullptr; }
+ const char* bailout_reason() const { return bailout_reason_; }
+
private:
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
@@ -399,9 +478,15 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
+ uint32_t num_used_spill_slots_ = 0;
+ const char* bailout_reason_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
+
+ void bailout(const char* reason) {
+ if (bailout_reason_ == nullptr) bailout_reason_ = reason;
+ }
};
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 255ee0347e..c6adb90f82 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -20,7 +20,7 @@ namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
-constexpr auto kI32Const = LiftoffAssembler::VarState::kI32Const;
+constexpr auto KIntConst = LiftoffAssembler::VarState::KIntConst;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
@@ -41,6 +41,8 @@ class MovableLabel {
Label* get() { return label_.get(); }
MovableLabel() : MovableLabel(new Label()) {}
+ operator bool() const { return label_ != nullptr; }
+
static MovableLabel None() { return MovableLabel(nullptr); }
private:
@@ -53,6 +55,8 @@ class MovableLabel {
public:
Label* get() { return &label_; }
+ operator bool() const { return true; }
+
static MovableLabel None() { return MovableLabel(); }
private:
@@ -60,6 +64,25 @@ class MovableLabel {
};
#endif
+wasm::WasmValue WasmPtrValue(uintptr_t ptr) {
+ using int_t = std::conditional<kPointerSize == 8, uint64_t, uint32_t>::type;
+ static_assert(sizeof(int_t) == sizeof(uintptr_t), "weird uintptr_t");
+ return wasm::WasmValue(static_cast<int_t>(ptr));
+}
+
+wasm::WasmValue WasmPtrValue(void* ptr) {
+ return WasmPtrValue(reinterpret_cast<uintptr_t>(ptr));
+}
+
+compiler::CallDescriptor* GetLoweredCallDescriptor(
+ Zone* zone, compiler::CallDescriptor* call_desc) {
+ return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
+ : call_desc;
+}
+
+constexpr ValueType kTypesArr_ilfd[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64};
+constexpr Vector<const ValueType> kTypes_ilfd = ArrayVector(kTypesArr_ilfd);
+
class LiftoffCompiler {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
@@ -106,32 +129,30 @@ class LiftoffCompiler {
};
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
- compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env,
+ compiler::CallDescriptor* call_descriptor,
+ compiler::ModuleEnv* env,
compiler::RuntimeExceptionSupport runtime_exception_support,
SourcePositionTableBuilder* source_position_table_builder,
std::vector<trap_handler::ProtectedInstructionData>*
protected_instructions,
Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone)
: asm_(liftoff_asm),
- call_desc_(call_desc),
+ descriptor_(
+ GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
- min_size_(env_->module->initial_pages * wasm::kWasmPageSize),
- max_size_((env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages) *
+ min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
+ max_size_(uint64_t{env_->module->has_maximum_pages
+ ? env_->module->maximum_pages
+ : wasm::kV8MaxWasmMemoryPages} *
wasm::kWasmPageSize),
runtime_exception_support_(runtime_exception_support),
source_position_table_builder_(source_position_table_builder),
protected_instructions_(protected_instructions),
compilation_zone_(compilation_zone),
codegen_zone_(codegen_zone),
- safepoint_table_builder_(compilation_zone_) {
- // Check for overflow in max_size_.
- DCHECK_EQ(max_size_, uint64_t{env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages} *
- wasm::kWasmPageSize);
- }
+ safepoint_table_builder_(compilation_zone_) {}
+
+ ~LiftoffCompiler() { BindUnboundLabels(nullptr); }
bool ok() const { return ok_; }
@@ -142,6 +163,26 @@ class LiftoffCompiler {
BindUnboundLabels(decoder);
}
+ bool DidAssemblerBailout(Decoder* decoder) {
+ if (decoder->failed() || !asm_->did_bailout()) return false;
+ unsupported(decoder, asm_->bailout_reason());
+ return true;
+ }
+
+ bool CheckSupportedType(Decoder* decoder,
+ Vector<const ValueType> supported_types,
+ ValueType type, const char* context) {
+ char buffer[128];
+ // Check supported types.
+ for (ValueType supported : supported_types) {
+ if (type == supported) return true;
+ }
+ SNPrintF(ArrayVector(buffer), "%s %s", WasmOpcodes::TypeName(type),
+ context);
+ unsupported(decoder, buffer);
+ return false;
+ }
+
int GetSafepointTableOffset() const {
return safepoint_table_builder_.GetCodeOffset();
}
@@ -150,7 +191,8 @@ class LiftoffCompiler {
#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
- for (uint32_t i = 0, e = decoder->control_depth(); i < e; ++i) {
+ uint32_t control_depth = decoder ? decoder->control_depth() : 0;
+ for (uint32_t i = 0; i < control_depth; ++i) {
Control* c = decoder->control_at(i);
Label* label = c->label.get();
if (!label->is_bound()) __ bind(label);
@@ -165,14 +207,6 @@ class LiftoffCompiler {
#endif
}
- void CheckStackSizeLimit(Decoder* decoder) {
- DCHECK_GE(__ cache_state()->stack_height(), __ num_locals());
- int stack_height = __ cache_state()->stack_height() - __ num_locals();
- if (stack_height > LiftoffAssembler::kMaxValueStackHeight) {
- unsupported(decoder, "value stack grows too large");
- }
- }
-
void StartFunction(Decoder* decoder) {
int num_locals = decoder->NumLocals();
__ set_num_locals(num_locals);
@@ -181,37 +215,48 @@ class LiftoffCompiler {
}
}
- void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
- ValueType type = __ local_type(param_idx);
- RegClass rc = reg_class_for(type);
- compiler::LinkageLocation param_loc =
- call_desc_->GetInputLocation(input_location);
- if (param_loc.IsRegister()) {
- DCHECK(!param_loc.IsAnyRegister());
- int reg_code = param_loc.AsRegister();
- LiftoffRegister reg =
- rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
- : LiftoffRegister(DoubleRegister::from_code(reg_code));
- LiftoffRegList cache_regs =
- rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
- if (cache_regs.has(reg)) {
- // This is a cache register, just use it.
- __ PushRegister(type, reg);
- return;
+ // Returns the number of inputs processed (1 or 2).
+ uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
+ const int num_lowered_params = 1 + (kNeedI64RegPair && type == kWasmI64);
+ // Initialize to anything, will be set in the loop and used afterwards.
+ LiftoffRegister reg = LiftoffRegister::from_code(kGpReg, 0);
+ RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
+ LiftoffRegList pinned;
+ for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
+ compiler::LinkageLocation param_loc =
+ descriptor_->GetInputLocation(input_idx + pair_idx);
+ // Initialize to anything, will be set in both arms of the if.
+ LiftoffRegister in_reg = LiftoffRegister::from_code(kGpReg, 0);
+ if (param_loc.IsRegister()) {
+ DCHECK(!param_loc.IsAnyRegister());
+ int reg_code = param_loc.AsRegister();
+ RegList cache_regs = rc == kGpReg ? kLiftoffAssemblerGpCacheRegs
+ : kLiftoffAssemblerFpCacheRegs;
+ if (cache_regs & (1 << reg_code)) {
+ // This is a cache register, just use it.
+ in_reg = LiftoffRegister::from_code(rc, reg_code);
+ } else {
+ // Move to a cache register (spill one if necessary).
+ // Note that we cannot create a {LiftoffRegister} for reg_code, since
+ // {LiftoffRegister} can only store cache regs.
+ LiftoffRegister in_reg = __ GetUnusedRegister(rc, pinned);
+ if (rc == kGpReg) {
+ __ Move(in_reg.gp(), Register::from_code(reg_code), type);
+ } else {
+ __ Move(in_reg.fp(), DoubleRegister::from_code(reg_code), type);
+ }
+ }
+ } else if (param_loc.IsCallerFrameSlot()) {
+ in_reg = __ GetUnusedRegister(rc, pinned);
+ ValueType lowered_type = num_lowered_params == 1 ? type : kWasmI32;
+ __ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
+ lowered_type);
}
- // Move to a cache register.
- LiftoffRegister cache_reg = __ GetUnusedRegister(rc);
- __ Move(cache_reg, reg);
- __ PushRegister(type, reg);
- return;
+ reg = pair_idx == 0 ? in_reg : LiftoffRegister::ForPair(reg, in_reg);
+ pinned.set(reg);
}
- if (param_loc.IsCallerFrameSlot()) {
- LiftoffRegister tmp_reg = __ GetUnusedRegister(rc);
- __ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
- __ PushRegister(type, tmp_reg);
- return;
- }
- UNREACHABLE();
+ __ PushRegister(type, reg);
+ return num_lowered_params;
}
void StackCheck(wasm::WasmCodePosition position) {
@@ -220,69 +265,65 @@ class LiftoffCompiler {
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
__ StackCheck(ool.label.get());
- __ bind(ool.continuation.get());
+ if (ool.continuation) __ bind(ool.continuation.get());
}
void StartFunctionBody(Decoder* decoder, Control* block) {
- if (!kLiftoffAssemblerImplementedOnThisPlatform) {
- unsupported(decoder, "platform");
- return;
- }
__ EnterFrame(StackFrame::WASM_COMPILED);
__ set_has_frame(true);
- __ ReserveStackSpace(LiftoffAssembler::kStackSlotSize *
- __ GetTotalFrameSlotCount());
+ pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
+ // {PrepareStackFrame} is the first platform-specific assembler method.
+ // If this failed, we can bail out immediately, avoiding runtime overhead
+ // and potential failures because of other unimplemented methods.
+ // A platform implementing {PrepareStackFrame} must ensure that we can
+ // finish compilation without errors even if we hit unimplemented
+ // LiftoffAssembler methods.
+ if (DidAssemblerBailout(decoder)) return;
// Parameter 0 is the wasm context.
uint32_t num_params =
- static_cast<uint32_t>(call_desc_->ParameterCount()) - 1;
+ static_cast<uint32_t>(decoder->sig_->parameter_count());
for (uint32_t i = 0; i < __ num_locals(); ++i) {
- switch (__ local_type(i)) {
- case kWasmI32:
- case kWasmF32:
- // supported.
- break;
- case kWasmI64:
- unsupported(decoder, "i64 param/local");
- return;
- case kWasmF64:
- unsupported(decoder, "f64 param/local");
- return;
- default:
- unsupported(decoder, "exotic param/local");
- return;
- }
+ if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
+ return;
}
// Input 0 is the call target, the context is at 1.
constexpr int kContextParameterIndex = 1;
// Store the context parameter to a special stack slot.
compiler::LinkageLocation context_loc =
- call_desc_->GetInputLocation(kContextParameterIndex);
+ descriptor_->GetInputLocation(kContextParameterIndex);
DCHECK(context_loc.IsRegister());
DCHECK(!context_loc.IsAnyRegister());
Register context_reg = Register::from_code(context_loc.AsRegister());
__ SpillContext(context_reg);
- uint32_t param_idx = 0;
- for (; param_idx < num_params; ++param_idx) {
- constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
- ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
+ // Input 0 is the code target, 1 is the context. First parameter at 2.
+ uint32_t input_idx = kContextParameterIndex + 1;
+ for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
+ input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
}
+ DCHECK_EQ(input_idx, descriptor_->InputCount());
// Set to a gp register, to mark this uninitialized.
LiftoffRegister zero_double_reg(Register::from_code<0>());
DCHECK(zero_double_reg.is_gp());
- for (; param_idx < __ num_locals(); ++param_idx) {
+ for (uint32_t param_idx = num_params; param_idx < __ num_locals();
+ ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
switch (type) {
case kWasmI32:
__ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
break;
+ case kWasmI64:
+ __ cache_state()->stack_state.emplace_back(kWasmI64, uint32_t{0});
+ break;
case kWasmF32:
+ case kWasmF64:
if (zero_double_reg.is_gp()) {
// Note: This might spill one of the registers used to hold
// parameters.
zero_double_reg = __ GetUnusedRegister(kFpReg);
- __ LoadConstant(zero_double_reg, WasmValue(0.f));
+ // Zero is represented by the bit pattern 0 for both f32 and f64.
+ __ LoadConstant(zero_double_reg, WasmValue(0.));
}
- __ PushRegister(kWasmF32, zero_double_reg);
+ __ PushRegister(type, zero_double_reg);
break;
default:
UNIMPLEMENTED();
@@ -294,9 +335,7 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(0);
- DCHECK_EQ(__ num_locals(), param_idx);
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
- CheckStackSizeLimit(decoder);
}
void GenerateOutOfLineCode(OutOfLineCode& ool) {
@@ -338,10 +377,13 @@ class LiftoffCompiler {
}
void FinishFunction(Decoder* decoder) {
+ if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(ool);
}
safepoint_table_builder_.Emit(asm_, __ GetTotalFrameSlotCount());
+ __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
+ __ GetTotalFrameSlotCount());
}
void OnFirstError(Decoder* decoder) {
@@ -391,8 +433,8 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero.
Register value = __ PopToRegister(kGpReg).gp();
- __ emit_i32_test(value);
- __ emit_cond_jump(kEqual, if_block->else_state->label.get());
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
+ value);
if_block->label_state.stack_base = __ cache_state()->stack_height();
// Store the state (after popping the value) for executing the else branch.
@@ -433,14 +475,15 @@ class LiftoffCompiler {
DCHECK_LE(num_args, kMaxArgs);
MachineSignature sig(kNumReturns, num_args, kReps);
- compiler::CallDescriptor* desc =
+ auto call_descriptor =
compiler::Linkage::GetSimplifiedCDescriptor(compilation_zone_, &sig);
// Before making a call, spill all cache registers.
__ SpillAllRegisters();
// Store arguments on our stack, then align the stack for calling to C.
- uint32_t num_params = static_cast<uint32_t>(desc->ParameterCount());
+ uint32_t num_params =
+ static_cast<uint32_t>(call_descriptor->ParameterCount());
__ PrepareCCall(num_params, arg_regs);
// Set parameters (in sp[0], sp[8], ...).
@@ -449,7 +492,7 @@ class LiftoffCompiler {
constexpr size_t kInputShift = 1; // Input 0 is the call target.
compiler::LinkageLocation loc =
- desc->GetInputLocation(param + kInputShift);
+ call_descriptor->GetInputLocation(param + kInputShift);
if (loc.IsRegister()) {
Register reg = Register::from_code(loc.AsRegister());
// Load address of that parameter to the register.
@@ -465,126 +508,209 @@ class LiftoffCompiler {
__ CallC(ext_ref, num_params);
// Load return value.
- compiler::LinkageLocation return_loc = desc->GetReturnLocation(0);
+ compiler::LinkageLocation return_loc =
+ call_descriptor->GetReturnLocation(0);
DCHECK(return_loc.IsRegister());
Register return_reg = Register::from_code(return_loc.AsRegister());
if (return_reg != res_reg) {
- __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg));
+ DCHECK_EQ(MachineRepresentation::kWord32,
+ sig.GetReturn(0).representation());
+ __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg), kWasmI32);
}
}
- void I32UnOp(bool (LiftoffAssembler::*emit_fn)(Register, Register),
- ExternalReference (*fallback_fn)(Isolate*)) {
+ template <ValueType type, class EmitFn>
+ void EmitUnOp(EmitFn fn) {
+ static RegClass rc = reg_class_for(type);
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetUnaryOpTargetRegister(kGpReg));
- LiftoffRegister src_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- if (!emit_fn || !(asm_->*emit_fn)(dst_reg.gp(), src_reg.gp())) {
+ LiftoffRegister dst = pinned.set(__ GetUnaryOpTargetRegister(rc));
+ LiftoffRegister src = __ PopToRegister(rc, pinned);
+ fn(dst, src);
+ __ PushRegister(type, dst);
+ }
+
+ void EmitI32UnOpWithCFallback(bool (LiftoffAssembler::*emit_fn)(Register,
+ Register),
+ ExternalReference (*fallback_fn)(Isolate*)) {
+ auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
+ if (emit_fn && (asm_->*emit_fn)(dst.gp(), src.gp())) return;
ExternalReference ext_ref = fallback_fn(asm_->isolate());
- Register args[] = {src_reg.gp()};
- GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
- }
- __ PushRegister(kWasmI32, dst_reg);
+ Register args[] = {src.gp()};
+ GenerateCCall(dst.gp(), arraysize(args), args, ext_ref);
+ };
+ EmitUnOp<kWasmI32>(emit_with_c_fallback);
}
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
-#define CASE_UNOP(opcode, type, fn, ext_ref_fn) \
- case WasmOpcode::kExpr##opcode: \
- type##UnOp(&LiftoffAssembler::emit_##fn, ext_ref_fn); \
+#define CASE_I32_UNOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.gp(), src.gp()); \
+ }); \
+ break;
+#define CASE_FLOAT_UNOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasm##type>([=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.fp(), src.fp()); \
+ }); \
break;
switch (opcode) {
- CASE_UNOP(I32Eqz, I32, i32_eqz, nullptr)
- CASE_UNOP(I32Clz, I32, i32_clz, nullptr)
- CASE_UNOP(I32Ctz, I32, i32_ctz, nullptr)
- CASE_UNOP(I32Popcnt, I32, i32_popcnt,
- &ExternalReference::wasm_word32_popcnt)
+ CASE_I32_UNOP(I32Clz, i32_clz)
+ CASE_I32_UNOP(I32Ctz, i32_ctz)
+ case kExprI32Popcnt:
+ EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
+ &ExternalReference::wasm_word32_popcnt);
+ break;
+ case kExprI32Eqz:
+ EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i32_set_cond(kEqual, dst.gp(), src.gp());
+ });
+ break;
+ CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
+ CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_UNOP
+#undef CASE_I32_UNOP
+#undef CASE_FLOAT_UNOP
}
- void I32BinOp(void (LiftoffAssembler::*emit_fn)(Register, Register,
- Register)) {
+ template <ValueType type, typename EmitFn>
+ void EmitMonomorphicBinOp(EmitFn fn) {
+ static constexpr RegClass rc = reg_class_for(type);
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
- (asm_->*emit_fn)(dst_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
- __ PushRegister(kWasmI32, dst_reg);
+ LiftoffRegister dst = pinned.set(__ GetBinaryOpTargetRegister(rc));
+ LiftoffRegister rhs = pinned.set(__ PopToRegister(rc, pinned));
+ LiftoffRegister lhs = __ PopToRegister(rc, pinned);
+ fn(dst, lhs, rhs);
+ __ PushRegister(type, dst);
}
- void I32CCallBinOp(ExternalReference ext_ref) {
+ template <ValueType result_type, RegClass src_rc, typename EmitFn>
+ void EmitBinOpWithDifferentResultType(EmitFn fn) {
LiftoffRegList pinned;
- LiftoffRegister dst_reg = pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
- Register args[] = {lhs_reg.gp(), rhs_reg.gp()};
- GenerateCCall(dst_reg.gp(), arraysize(args), args, ext_ref);
- __ PushRegister(kWasmI32, dst_reg);
- }
-
- void F32BinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
- DoubleRegister,
- DoubleRegister)) {
- LiftoffRegList pinned;
- LiftoffRegister target_reg =
- pinned.set(__ GetBinaryOpTargetRegister(kFpReg));
- LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kFpReg, pinned));
- LiftoffRegister lhs_reg = __ PopToRegister(kFpReg, pinned);
- (asm_->*emit_fn)(target_reg.fp(), lhs_reg.fp(), rhs_reg.fp());
- __ PushRegister(kWasmF32, target_reg);
+ LiftoffRegister rhs = pinned.set(__ PopToRegister(src_rc, pinned));
+ LiftoffRegister lhs = pinned.set(__ PopToRegister(src_rc, pinned));
+ LiftoffRegister dst = __ GetUnusedRegister(reg_class_for(result_type));
+ fn(dst, lhs, rhs);
+ __ PushRegister(result_type, dst);
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
-#define CASE_BINOP(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
- return type##BinOp(&LiftoffAssembler::emit_##fn);
-#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
- case WasmOpcode::kExpr##opcode: \
- type##CCallBinOp(ExternalReference::ext_ref_fn(asm_->isolate())); \
- break;
+#define CASE_I32_BINOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
+ });
+#define CASE_FLOAT_BINOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasm##type>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
+ });
+#define CASE_I32_CMPOP(opcode, cond) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
+ });
+#define CASE_F32_CMPOP(opcode, cond) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOpWithDifferentResultType<kWasmI32, kFpReg>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
+ });
+#define CASE_SHIFTOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {}); \
+ });
+#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitMonomorphicBinOp<kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ Register args[] = {lhs.gp(), rhs.gp()}; \
+ auto ext_ref = ExternalReference::ext_ref_fn(__ isolate()); \
+ GenerateCCall(dst.gp(), arraysize(args), args, ext_ref); \
+ });
switch (opcode) {
- CASE_BINOP(I32Add, I32, i32_add)
- CASE_BINOP(I32Sub, I32, i32_sub)
- CASE_BINOP(I32Mul, I32, i32_mul)
- CASE_BINOP(I32And, I32, i32_and)
- CASE_BINOP(I32Ior, I32, i32_or)
- CASE_BINOP(I32Xor, I32, i32_xor)
- CASE_BINOP(I32Shl, I32, i32_shl)
- CASE_BINOP(I32ShrS, I32, i32_sar)
- CASE_BINOP(I32ShrU, I32, i32_shr)
+ CASE_I32_BINOP(I32Add, i32_add)
+ CASE_I32_BINOP(I32Sub, i32_sub)
+ CASE_I32_BINOP(I32Mul, i32_mul)
+ CASE_I32_BINOP(I32And, i32_and)
+ CASE_I32_BINOP(I32Ior, i32_or)
+ CASE_I32_BINOP(I32Xor, i32_xor)
+ CASE_I32_CMPOP(I32Eq, kEqual)
+ CASE_I32_CMPOP(I32Ne, kUnequal)
+ CASE_I32_CMPOP(I32LtS, kSignedLessThan)
+ CASE_I32_CMPOP(I32LtU, kUnsignedLessThan)
+ CASE_I32_CMPOP(I32GtS, kSignedGreaterThan)
+ CASE_I32_CMPOP(I32GtU, kUnsignedGreaterThan)
+ CASE_I32_CMPOP(I32LeS, kSignedLessEqual)
+ CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
+ CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
+ CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
+ CASE_F32_CMPOP(F32Eq, kEqual)
+ CASE_F32_CMPOP(F32Ne, kUnequal)
+ CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
+ CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
+ CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
+ CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
+ CASE_SHIFTOP(I32Shl, i32_shl)
+ CASE_SHIFTOP(I32ShrS, i32_sar)
+ CASE_SHIFTOP(I32ShrU, i32_shr)
CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
- CASE_BINOP(F32Add, F32, f32_add)
- CASE_BINOP(F32Sub, F32, f32_sub)
- CASE_BINOP(F32Mul, F32, f32_mul)
+ CASE_FLOAT_BINOP(F32Add, F32, f32_add)
+ CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
+ CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
+ CASE_FLOAT_BINOP(F64Add, F64, f64_add)
+ CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
+ CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
-#undef CASE_BINOP
+#undef CASE_I32_BINOP
+#undef CASE_FLOAT_BINOP
+#undef CASE_I32_CMPOP
+#undef CASE_F32_CMPOP
+#undef CASE_SHIFTOP
#undef CASE_CCALL_BINOP
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
__ cache_state()->stack_state.emplace_back(kWasmI32, value);
- CheckStackSizeLimit(decoder);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
- unsupported(decoder, "i64.const");
+ // The {VarState} stores constant values as int32_t, thus we only store
+ // 64-bit constants in this field if it fits in an int32_t. Larger values
+ // cannot be used as immediate value anyway, so we can also just put them in
+ // a register immediately.
+ int32_t value_i32 = static_cast<int32_t>(value);
+ if (value_i32 == value) {
+ __ cache_state()->stack_state.emplace_back(kWasmI64, value_i32);
+ } else {
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmI64, reg);
+ }
}
void F32Const(Decoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
- CheckStackSizeLimit(decoder);
}
void F64Const(Decoder* decoder, Value* result, double value) {
- unsupported(decoder, "f64.const");
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ __ LoadConstant(reg, WasmValue(value));
+ __ PushRegister(kWasmF64, reg);
}
void Drop(Decoder* decoder, const Value& value) {
@@ -603,11 +729,11 @@ class LiftoffCompiler {
if (values.size() > 1) return unsupported(decoder, "multi-return");
RegClass rc = reg_class_for(values[0].type);
LiftoffRegister reg = __ PopToRegister(rc);
- __ MoveToReturnRegister(reg);
+ __ MoveToReturnRegister(reg, values[0].type);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
- static_cast<uint32_t>(call_desc_->StackParameterCount()));
+ static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
void GetLocal(Decoder* decoder, Value* result,
@@ -618,37 +744,36 @@ class LiftoffCompiler {
case kRegister:
__ PushRegister(slot.type(), slot.reg());
break;
- case kI32Const:
+ case KIntConst:
__ cache_state()->stack_state.emplace_back(operand.type,
slot.i32_const());
break;
case kStack: {
auto rc = reg_class_for(operand.type);
LiftoffRegister reg = __ GetUnusedRegister(rc);
- __ Fill(reg, operand.index);
+ __ Fill(reg, operand.index, operand.type);
__ PushRegister(slot.type(), reg);
break;
}
}
- CheckStackSizeLimit(decoder);
}
void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
+ ValueType type = dst_slot.type();
if (dst_slot.is_reg()) {
LiftoffRegister slot_reg = dst_slot.reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot.reg(), state.stack_height() - 1);
+ __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
return;
}
state.dec_used(slot_reg);
}
- ValueType type = dst_slot.type();
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
- __ Fill(dst_reg, __ cache_state()->stack_height() - 1);
+ __ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
@@ -663,7 +788,7 @@ class LiftoffCompiler {
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
- case kI32Const:
+ case KIntConst:
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
@@ -701,7 +826,6 @@ class LiftoffCompiler {
return unsupported(decoder, "global > kPointerSize");
__ Load(value, addr, no_reg, global->offset, type, pinned);
__ PushRegister(global->type, value);
- CheckStackSizeLimit(decoder);
}
void SetGlobal(Decoder* decoder, const Value& value,
@@ -742,16 +866,76 @@ class LiftoffCompiler {
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister(kGpReg).gp();
- __ emit_i32_test(value);
- __ emit_cond_jump(kEqual, &cont_false);
+ __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
Br(target);
__ bind(&cont_false);
}
+ // Generate a branch table case, potentially reusing previously generated
+ // stack transfer code.
+ void GenerateBrCase(Decoder* decoder, uint32_t br_depth,
+ std::map<uint32_t, MovableLabel>& br_targets) {
+ MovableLabel& label = br_targets[br_depth];
+ if (label.get()->is_bound()) {
+ __ jmp(label.get());
+ } else {
+ __ bind(label.get());
+ Br(decoder->control_at(br_depth));
+ }
+ }
+
+ // Generate a branch table for input in [min, max).
+ // TODO(wasm): Generate a real branch table (like TF TableSwitch).
+ void GenerateBrTable(Decoder* decoder, LiftoffRegister tmp,
+ LiftoffRegister value, uint32_t min, uint32_t max,
+ BranchTableIterator<validate>& table_iterator,
+ std::map<uint32_t, MovableLabel>& br_targets) {
+ DCHECK_LT(min, max);
+ // Check base case.
+ if (max == min + 1) {
+ DCHECK_EQ(min, table_iterator.cur_index());
+ GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ return;
+ }
+
+ uint32_t split = min + (max - min) / 2;
+ Label upper_half;
+ __ LoadConstant(tmp, WasmValue(split));
+ __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
+ tmp.gp());
+ // Emit br table for lower half:
+ GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
+ br_targets);
+ __ bind(&upper_half);
+ // Emit br table for upper half:
+ GenerateBrTable(decoder, tmp, value, split, max, table_iterator,
+ br_targets);
+ }
+
void BrTable(Decoder* decoder, const BranchTableOperand<validate>& operand,
const Value& key) {
- unsupported(decoder, "br_table");
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(kGpReg));
+ BranchTableIterator<validate> table_iterator(decoder, operand);
+ std::map<uint32_t, MovableLabel> br_targets;
+
+ if (operand.table_count > 0) {
+ LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
+ __ LoadConstant(tmp, WasmValue(uint32_t{operand.table_count}));
+ Label case_default;
+ __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
+ value.gp(), tmp.gp());
+
+ GenerateBrTable(decoder, tmp, value, 0, operand.table_count,
+ table_iterator, br_targets);
+
+ __ bind(&case_default);
+ }
+
+ // Generate the default case.
+ GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ DCHECK(!table_iterator.has_next());
}
void Else(Decoder* decoder, Control* if_block) {
@@ -760,28 +944,45 @@ class LiftoffCompiler {
__ cache_state()->Steal(if_block->else_state->state);
}
- Label* AddOutOfLineTrap(wasm::WasmCodePosition position, uint32_t pc = 0) {
+ Label* AddOutOfLineTrap(wasm::WasmCodePosition position,
+ Builtins::Name builtin, uint32_t pc = 0) {
DCHECK(!FLAG_wasm_no_bounds_checks);
- // The pc is needed exactly if trap handlers are enabled.
- DCHECK_EQ(pc != 0, env_->use_trap_handler);
+ // The pc is needed for memory OOB trap with trap handler enabled. Other
+ // callers should not even compute it.
+ DCHECK_EQ(pc != 0, builtin == Builtins::kThrowWasmTrapMemOutOfBounds &&
+ env_->use_trap_handler);
- out_of_line_code_.push_back(OutOfLineCode::Trap(
- Builtins::kThrowWasmTrapMemOutOfBounds, position, pc));
+ out_of_line_code_.push_back(OutOfLineCode::Trap(builtin, position, pc));
return out_of_line_code_.back().label.get();
}
- void BoundsCheckMem(uint32_t access_size, uint32_t offset, Register index,
- wasm::WasmCodePosition position, LiftoffRegList pinned) {
- DCHECK(!env_->use_trap_handler);
- if (FLAG_wasm_no_bounds_checks) return;
+ // Returns true if the memory access is statically known to be out of bounds
+ // (a jump to the trap was generated then); return false otherwise.
+ bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
+ Register index, LiftoffRegList pinned) {
+ const bool statically_oob =
+ access_size > max_size_ || offset > max_size_ - access_size;
+
+ if (!statically_oob &&
+ (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
+ return false;
+ }
- Label* trap_label = AddOutOfLineTrap(position);
+ Label* trap_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapMemOutOfBounds);
- if (access_size > max_size_ || offset > max_size_ - access_size) {
- // The access will be out of bounds, even for the largest memory.
+ if (statically_oob) {
__ emit_jump(trap_label);
- return;
+ Control* current_block = decoder->control_at(0);
+ if (current_block->reachable()) {
+ current_block->reachability = kSpecOnlyReachable;
+ }
+ return true;
}
+
+ DCHECK(!env_->use_trap_handler);
+ DCHECK(!FLAG_wasm_no_bounds_checks);
+
uint32_t end_offset = offset + access_size - 1;
// If the end offset is larger than the smallest memory, dynamically check
@@ -793,8 +994,8 @@ class LiftoffCompiler {
__ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4);
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
if (end_offset >= min_size_) {
- __ emit_i32_compare(end_offset_reg.gp(), mem_size.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32,
+ end_offset_reg.gp(), mem_size.gp());
}
// Just reuse the end_offset register for computing the effective size.
@@ -802,8 +1003,9 @@ class LiftoffCompiler {
__ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp());
- __ emit_i32_compare(index, effective_size_reg.gp());
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index,
+ effective_size_reg.gp());
+ return false;
}
void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
@@ -842,22 +1044,23 @@ class LiftoffCompiler {
}
void GenerateRuntimeCall(int num_args, Register* args) {
- compiler::CallDescriptor* desc =
- compiler::Linkage::GetRuntimeCallDescriptor(
- compilation_zone_, Runtime::kWasmTraceMemory, num_args,
- compiler::Operator::kNoProperties,
- compiler::CallDescriptor::kNoFlags);
+ auto call_descriptor = compiler::Linkage::GetRuntimeCallDescriptor(
+ compilation_zone_, Runtime::kWasmTraceMemory, num_args,
+ compiler::Operator::kNoProperties, compiler::CallDescriptor::kNoFlags);
// Currently, only one argument is supported. More arguments require some
// caution for the parallel register moves (reuse StackTransferRecipe).
DCHECK_EQ(1, num_args);
constexpr size_t kInputShift = 1; // Input 0 is the call target.
- compiler::LinkageLocation param_loc = desc->GetInputLocation(kInputShift);
+ compiler::LinkageLocation param_loc =
+ call_descriptor->GetInputLocation(kInputShift);
if (param_loc.IsRegister()) {
Register reg = Register::from_code(param_loc.AsRegister());
- __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]));
+ __ Move(LiftoffRegister(reg), LiftoffRegister(args[0]),
+ LiftoffAssembler::kWasmIntPtr);
} else {
DCHECK(param_loc.IsCallerFrameSlot());
- __ PushCallerFrameSlot(LiftoffRegister(args[0]));
+ __ PushCallerFrameSlot(LiftoffRegister(args[0]),
+ LiftoffAssembler::kWasmIntPtr);
}
// Allocate the codegen zone if not done before.
@@ -873,14 +1076,11 @@ class LiftoffCompiler {
const MemoryAccessOperand<validate>& operand,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
- if (value_type != kWasmI32 && value_type != kWasmF32)
- return unsupported(decoder, "unsupported load type");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "load")) return;
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
- if (!env_->use_trap_handler) {
- // Emit an explicit bounds check.
- BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
- pinned);
+ if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
+ return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
@@ -890,10 +1090,11 @@ class LiftoffCompiler {
__ Load(value, addr, index, operand.offset, type, pinned,
&protected_load_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(), protected_load_pc);
+ AddOutOfLineTrap(decoder->position(),
+ Builtins::kThrowWasmTrapMemOutOfBounds,
+ protected_load_pc);
}
__ PushRegister(value_type, value);
- CheckStackSizeLimit(decoder);
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -905,16 +1106,13 @@ class LiftoffCompiler {
const MemoryAccessOperand<validate>& operand,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
- if (value_type != kWasmI32 && value_type != kWasmF32)
- return unsupported(decoder, "unsupported store type");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "store")) return;
RegClass rc = reg_class_for(value_type);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(rc));
Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
- if (!env_->use_trap_handler) {
- // Emit an explicit bounds check.
- BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
- pinned);
+ if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
+ return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
@@ -922,7 +1120,9 @@ class LiftoffCompiler {
__ Store(addr, index, operand.offset, value, type, pinned,
&protected_store_pc);
if (env_->use_trap_handler) {
- AddOutOfLineTrap(decoder->position(), protected_store_pc);
+ AddOutOfLineTrap(decoder->position(),
+ Builtins::kThrowWasmTrapMemOutOfBounds,
+ protected_store_pc);
}
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(true, type.mem_rep(), index, operand.offset,
@@ -942,11 +1142,17 @@ class LiftoffCompiler {
const Value args[], Value returns[]) {
if (operand.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
+ if (operand.sig->return_count() == 1 &&
+ !CheckSupportedType(decoder, kTypes_ilfd, operand.sig->GetReturn(0),
+ "return"))
+ return;
- compiler::CallDescriptor* call_desc =
+ auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
- __ PrepareCall(operand.sig, call_desc);
+ __ PrepareCall(operand.sig, call_descriptor);
source_position_table_builder_->AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
@@ -965,14 +1171,166 @@ class LiftoffCompiler {
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ FinishCall(operand.sig, call_desc);
+ __ FinishCall(operand.sig, call_descriptor);
}
- void CallIndirect(Decoder* decoder, const Value& index,
+ void CallIndirect(Decoder* decoder, const Value& index_val,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
- unsupported(decoder, "call_indirect");
+ if (operand.sig->return_count() > 1) {
+ return unsupported(decoder, "multi-return");
+ }
+ if (operand.sig->return_count() == 1 &&
+ !CheckSupportedType(decoder, kTypes_ilfd, operand.sig->GetReturn(0),
+ "return")) {
+ return;
+ }
+
+ // Assume only one table for now.
+ uint32_t table_index = 0;
+
+ // Pop the index.
+ LiftoffRegister index = __ PopToRegister(kGpReg);
+ // If that register is still being used after popping, we move it to another
+ // register, because we want to modify that register.
+ if (__ cache_state()->is_used(index)) {
+ LiftoffRegister new_index =
+ __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(index));
+ __ Move(new_index, index, kWasmI32);
+ index = new_index;
+ }
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ // Get three temporary registers.
+ LiftoffRegister table = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp_const =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ LiftoffRegister* explicit_context = nullptr;
+
+ // Bounds check against the table size.
+ Label* invalid_func_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncInvalid);
+
+ static constexpr LoadType kPointerLoadType =
+ kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
+ static constexpr int kFixedArrayOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag;
+
+ uint32_t canonical_sig_num = env_->module->signature_ids[operand.sig_index];
+ DCHECK_GE(canonical_sig_num, 0);
+ DCHECK_GE(kMaxInt, canonical_sig_num);
+
+ if (WASM_CONTEXT_TABLES) {
+ // Compare against table size stored in {wasm_context->table_size}.
+ __ LoadFromContext(tmp_const.gp(), offsetof(WasmContext, table_size),
+ sizeof(uint32_t));
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
+ index.gp(), tmp_const.gp());
+ // Load the table from {wasm_context->table}
+ __ LoadFromContext(table.gp(), offsetof(WasmContext, table),
+ kPointerSize);
+ // Load the signature from {wasm_context->table[$index].sig_id}
+ // == wasm_context.table + $index * #sizeof(IndirectionFunctionTableEntry)
+ // + #offsetof(sig_id)
+ __ LoadConstant(
+ tmp_const,
+ WasmValue(static_cast<uint32_t>(sizeof(IndirectFunctionTableEntry))));
+ __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
+ __ Load(scratch, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, sig_id), LoadType::kI32Load,
+ pinned);
+
+ __ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
+
+ Label* sig_mismatch_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label,
+ LiftoffAssembler::kWasmIntPtr, scratch.gp(),
+ tmp_const.gp());
+
+ // Load the target address from {wasm_context->table[$index].target}
+ __ Load(scratch, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, target), kPointerLoadType,
+ pinned);
+
+ // Load the context from {wasm_context->table[$index].context}
+ // TODO(wasm): directly allocate the correct context register to avoid
+ // any potential moves.
+ __ Load(tmp_const, table.gp(), index.gp(),
+ offsetof(IndirectFunctionTableEntry, context), kPointerLoadType,
+ pinned);
+ explicit_context = &tmp_const;
+ } else {
+ // Compare against table size, which is a patchable constant.
+ uint32_t table_size =
+ env_->module->function_tables[table_index].initial_size;
+
+ __ LoadConstant(tmp_const, WasmValue(table_size),
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
+ index.gp(), tmp_const.gp());
+
+ wasm::GlobalHandleAddress function_table_handle_address =
+ env_->function_tables[table_index];
+ __ LoadConstant(table, WasmPtrValue(function_table_handle_address),
+ RelocInfo::WASM_GLOBAL_HANDLE);
+ __ Load(table, table.gp(), no_reg, 0, kPointerLoadType, pinned);
+
+ // Load signature from the table and check.
+ // The table is a FixedArray; signatures are encoded as SMIs.
+ // [sig1, code1, sig2, code2, sig3, code3, ...]
+ static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
+ static_assert(compiler::kFunctionTableSignatureOffset == 0,
+ "consistency");
+ static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
+ __ LoadConstant(tmp_const, WasmValue(kPointerSizeLog2 + 1));
+ // Shift index such that it's the offset of the signature in the
+ // FixedArray.
+ __ emit_i32_shl(index.gp(), index.gp(), tmp_const.gp(), pinned);
+
+ // Load the signature.
+ __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset,
+ kPointerLoadType, pinned);
+
+ __ LoadConstant(tmp_const, WasmPtrValue(Smi::FromInt(canonical_sig_num)));
+
+ Label* sig_mismatch_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label,
+ LiftoffAssembler::kWasmIntPtr, scratch.gp(),
+ tmp_const.gp());
+
+ // Load code object.
+ __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset + kPointerSize,
+ kPointerLoadType, pinned);
+
+ // Move the pointer from the Code object to the instruction start.
+ __ LoadConstant(tmp_const,
+ WasmPtrValue(Code::kHeaderSize - kHeapObjectTag));
+ __ emit_ptrsize_add(scratch.gp(), scratch.gp(), tmp_const.gp());
+ }
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+
+ auto call_descriptor =
+ compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+
+ Register target = scratch.gp();
+ __ PrepareCall(operand.sig, call_descriptor, &target, explicit_context);
+ __ CallIndirect(operand.sig, call_descriptor, target);
+
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ __ FinishCall(operand.sig, call_descriptor);
}
+
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
@@ -1009,11 +1367,11 @@ class LiftoffCompiler {
private:
LiftoffAssembler* const asm_;
- compiler::CallDescriptor* const call_desc_;
+ compiler::CallDescriptor* const descriptor_;
compiler::ModuleEnv* const env_;
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
- const uint32_t min_size_;
- const uint32_t max_size_;
+ const uint64_t min_size_;
+ const uint64_t max_size_;
const compiler::RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
@@ -1027,6 +1385,9 @@ class LiftoffCompiler {
// code generation (in FinishCompilation).
std::unique_ptr<Zone>* codegen_zone_;
SafepointTableBuilder safepoint_table_builder_;
+ // The pc offset of the instructions to reserve the stack frame. Needed to
+ // patch the actually needed stack size in the end.
+ uint32_t pc_offset_stack_frame_construction_ = 0;
void TraceCacheState(Decoder* decoder) const {
#ifdef DEBUG
@@ -1061,11 +1422,11 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
Zone zone(isolate_->allocator(), "LiftoffCompilationZone");
const wasm::WasmModule* module = env_ ? env_->module : nullptr;
- auto* call_desc = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
+ auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, counters()->liftoff_compile_time());
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, func_body_, &liftoff_.asm_, call_desc, env_,
+ decoder(&zone, module, func_body_, &liftoff_.asm_, call_descriptor, env_,
runtime_exception_support_,
&liftoff_.source_position_table_builder_,
protected_instructions_.get(), &zone, &liftoff_.codegen_zone_);
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index bb5ef5be4a..eedbf54a17 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -8,8 +8,6 @@
#include <iosfwd>
#include <memory>
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-opcodes.h"
@@ -18,24 +16,29 @@ namespace v8 {
namespace internal {
namespace wasm {
-enum RegClass { kNoReg, kGpReg, kFpReg };
+static constexpr bool kNeedI64RegPair = kPointerSize == 4;
+
+enum RegClass : uint8_t {
+ kGpReg,
+ kFpReg,
+ // {kGpRegPair} equals {kNoReg} if {kNeedI64RegPair} is false.
+ kGpRegPair,
+ kNoReg = kGpRegPair + kNeedI64RegPair
+};
+
+enum RegPairHalf : uint8_t { kLowWord, kHighWord };
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
- return type == kWasmI32 || type == kWasmI64 // int types
- ? kGpReg
- : type == kWasmF32 || type == kWasmF64 // float types
- ? kFpReg
- : kNoReg; // other (unsupported) types
+ return kNeedI64RegPair && type == kWasmI64 // i64 on 32 bit
+ ? kGpRegPair
+ : type == kWasmI32 || type == kWasmI64 // int types
+ ? kGpReg
+ : type == kWasmF32 || type == kWasmF64 // float types
+ ? kFpReg
+ : kNoReg; // other (unsupported) types
}
-// RegForClass<rc>: Register for rc==kGpReg, DoubleRegister for rc==kFpReg, void
-// for all other values of rc.
-template <RegClass rc>
-using RegForClass = typename std::conditional<
- rc == kGpReg, Register,
- typename std::conditional<rc == kFpReg, DoubleRegister, void>::type>::type;
-
// Maximum code of a gp cache register.
static constexpr int kMaxGpRegCode =
8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
@@ -47,14 +50,28 @@ static constexpr int kMaxFpRegCode =
// LiftoffRegister encodes both gp and fp in a unified index space.
// [0 .. kMaxGpRegCode] encodes gp registers,
// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
+// I64 values on 32 bit platforms are stored in two registers, both encoded in
+// the same LiftoffRegister value.
static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
static constexpr int kAfterMaxLiftoffFpRegCode =
kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
-static_assert(kAfterMaxLiftoffRegCode < 256,
- "liftoff register codes can be stored in one uint8_t");
+static constexpr int kBitsPerLiftoffRegCode =
+ 32 - base::bits::CountLeadingZeros<uint32_t>(kAfterMaxLiftoffRegCode - 1);
+static constexpr int kBitsPerGpRegCode =
+ 32 - base::bits::CountLeadingZeros<uint32_t>(kMaxGpRegCode);
+static constexpr int kBitsPerGpRegPair = 1 + 2 * kBitsPerGpRegCode;
class LiftoffRegister {
+ static constexpr int needed_bits =
+ Max(kNeedI64RegPair ? kBitsPerGpRegPair : 0, kBitsPerLiftoffRegCode);
+ using storage_t = std::conditional<
+ needed_bits <= 8, uint8_t,
+ std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
+ static_assert(8 * sizeof(storage_t) >= needed_bits &&
+ 8 * sizeof(storage_t) < 2 * needed_bits,
+ "right type has been chosen");
+
public:
explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
DCHECK_EQ(reg, gp());
@@ -67,6 +84,7 @@ class LiftoffRegister {
static LiftoffRegister from_liftoff_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kAfterMaxLiftoffRegCode, code);
+ DCHECK_EQ(code, static_cast<storage_t>(code));
return LiftoffRegister(code);
}
@@ -81,12 +99,40 @@ class LiftoffRegister {
}
}
+ static LiftoffRegister ForPair(LiftoffRegister low, LiftoffRegister high) {
+ DCHECK(kNeedI64RegPair);
+ DCHECK_NE(low, high);
+ storage_t combined_code = low.gp().code() |
+ high.gp().code() << kBitsPerGpRegCode |
+ 1 << (2 * kBitsPerGpRegCode);
+ return LiftoffRegister(combined_code);
+ }
+
+ constexpr bool is_pair() const {
+ return kNeedI64RegPair && (code_ & (1 << (2 * kBitsPerGpRegCode))) != 0;
+ }
constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
constexpr bool is_fp() const {
return code_ >= kAfterMaxLiftoffGpRegCode &&
code_ < kAfterMaxLiftoffFpRegCode;
}
+ LiftoffRegister low() const { return LiftoffRegister(low_gp()); }
+
+ LiftoffRegister high() const { return LiftoffRegister(high_gp()); }
+
+ Register low_gp() const {
+ DCHECK(is_pair());
+ static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
+ return Register::from_code(code_ & kCodeMask);
+ }
+
+ Register high_gp() const {
+ DCHECK(is_pair());
+ static constexpr storage_t kCodeMask = (1 << kBitsPerGpRegCode) - 1;
+ return Register::from_code((code_ >> kBitsPerGpRegCode) & kCodeMask);
+ }
+
Register gp() const {
DCHECK(is_gp());
return Register::from_code(code_);
@@ -97,31 +143,46 @@ class LiftoffRegister {
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
- int liftoff_code() const { return code_; }
+ uint32_t liftoff_code() const {
+ DCHECK(is_gp() || is_fp());
+ return code_;
+ }
RegClass reg_class() const {
- DCHECK(is_gp() || is_fp());
- return is_gp() ? kGpReg : kFpReg;
+ return is_pair() ? kGpRegPair : is_gp() ? kGpReg : kFpReg;
}
bool operator==(const LiftoffRegister other) const {
+ DCHECK_EQ(is_pair(), other.is_pair());
return code_ == other.code_;
}
bool operator!=(const LiftoffRegister other) const {
+ DCHECK_EQ(is_pair(), other.is_pair());
return code_ != other.code_;
}
+ bool overlaps(const LiftoffRegister other) const {
+ if (is_pair()) return low().overlaps(other) || high().overlaps(other);
+ if (other.is_pair()) return *this == other.low() || *this == other.high();
+ return *this == other;
+ }
private:
- uint8_t code_;
+ storage_t code_;
- explicit constexpr LiftoffRegister(uint8_t code) : code_(code) {}
+ explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
"LiftoffRegister can efficiently be passed by value");
inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
- return reg.is_gp() ? os << "gp" << reg.gp().code()
- : os << "fp" << reg.fp().code();
+ if (reg.is_pair()) {
+ return os << "<gp" << reg.low_gp().code() << "+" << reg.high_gp().code()
+ << ">";
+ } else if (reg.is_gp()) {
+ return os << "gp" << reg.gp().code();
+ } else {
+ return os << "fp" << reg.fp().code();
+ }
}
class LiftoffRegList {
@@ -144,16 +205,30 @@ class LiftoffRegList {
}
LiftoffRegister set(LiftoffRegister reg) {
- regs_ |= storage_t{1} << reg.liftoff_code();
+ if (reg.is_pair()) {
+ regs_ |= storage_t{1} << reg.low().liftoff_code();
+ regs_ |= storage_t{1} << reg.high().liftoff_code();
+ } else {
+ regs_ |= storage_t{1} << reg.liftoff_code();
+ }
return reg;
}
LiftoffRegister clear(LiftoffRegister reg) {
- regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ if (reg.is_pair()) {
+ regs_ &= ~(storage_t{1} << reg.low().liftoff_code());
+ regs_ &= ~(storage_t{1} << reg.high().liftoff_code());
+ } else {
+ regs_ &= ~(storage_t{1} << reg.liftoff_code());
+ }
return reg;
}
bool has(LiftoffRegister reg) const {
+ if (reg.is_pair()) {
+ DCHECK_EQ(has(reg.low()), has(reg.high()));
+ reg = reg.low();
+ }
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
@@ -211,7 +286,7 @@ class LiftoffRegList {
template <typename... Regs>
static LiftoffRegList ForRegs(Regs... regs) {
std::array<LiftoffRegister, sizeof...(regs)> regs_arr{
- LiftoffRegister(regs)...};
+ {LiftoffRegister(regs)...}};
LiftoffRegList list;
for (LiftoffRegister reg : regs_arr) list.set(reg);
return list;
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 50ab1e82c8..fda98aea62 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -2,180 +2,542 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+#ifndef V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
+#define V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("mips " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+namespace liftoff {
+
+// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
+// is located at sp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
+inline MemOperand GetStackSlot(uint32_t index) {
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+
+} // namespace liftoff
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ addiu(sp, sp, 0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+ DCHECK_LE(bytes, kMaxInt);
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.addiu(sp, sp, -bytes);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type()) {
+ case kWasmI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kWasmI64: {
+ DCHECK(RelocInfo::IsNone(rmode));
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::li(reg.low_gp(), Operand(low_word));
+ TurboAssembler::li(reg.high_gp(), Operand(high_word));
+ break;
+ }
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ DCHECK_LE(offset, kMaxInt);
+ lw(dst, liftoff::GetContextOperand());
+ DCHECK_EQ(4, size);
+ lw(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ sw(context, liftoff::GetContextOperand());
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ lw(dst, liftoff::GetContextOperand());
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register src = no_reg;
+ if (offset_reg != no_reg) {
+ src = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(src, src_addr, offset_reg);
+ }
+ MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
+ : MemOperand(src_addr, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8U:
+ lbu(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI32Load8S:
+ lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load8S:
+ lb(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI32Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI32Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI32Load:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
+ break;
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ TurboAssembler::Move(dst.high_gp(), dst.low_gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ break;
+ case LoadType::kI64Load: {
+ MemOperand src_op_upper = (offset_reg != no_reg)
+ ? MemOperand(src, offset_imm + 4)
+ : MemOperand(src_addr, offset_imm + 4);
+ TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
+ TurboAssembler::Ulw(dst.low_gp(), src_op);
+ break;
+ }
+ case LoadType::kF32Load:
+ TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Uldc1(dst.fp(), src_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register dst = no_reg;
+ if (offset_reg != no_reg) {
+ dst = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(dst, dst_addr, offset_reg);
+ }
+ MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
+ : MemOperand(dst_addr, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store16:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store16:
+ TurboAssembler::Ush(src.gp(), dst_op, t8);
+ break;
+ case StoreType::kI64Store32:
+ src = src.low();
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store: {
+ MemOperand dst_op_upper = (offset_reg != no_reg)
+ ? MemOperand(dst, offset_imm + 4)
+ : MemOperand(dst_addr, offset_imm + 4);
+ TurboAssembler::Usw(src.high_gp(), dst_op_upper);
+ TurboAssembler::Usw(src.low_gp(), dst_op);
+ break;
+ }
+ case StoreType::kF32Store:
+ TurboAssembler::Uswc1(src.fp(), dst_op, t8);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Usdc1(src.fp(), dst_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ DCHECK_NE(dst_index, src_index);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ // TODO(wasm): Extract the destination register from the CallDescriptor.
+ // TODO(wasm): Add multi-return support.
+ LiftoffRegister dst =
+ reg.is_pair()
+ ? LiftoffRegister::ForPair(LiftoffRegister(v0), LiftoffRegister(v1))
+ : reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::mov(dst, src);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ sw(reg.gp(), dst);
+ break;
+ case kWasmI64:
+ sw(reg.low_gp(), dst);
+ sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ swc1(reg.fp(), dst);
+ break;
+ case kWasmF64:
+ TurboAssembler::Sdc1(reg.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ sw(tmp.gp(), dst);
+ break;
+ }
+ case kWasmI64: {
+ LiftoffRegister low = GetUnusedRegister(kGpReg);
+ LiftoffRegister high = GetUnusedRegister(kGpReg);
+
+ int32_t low_word = value.to_i64();
+ int32_t high_word = value.to_i64() >> 32;
+ TurboAssembler::li(low.gp(), Operand(low_word));
+ TurboAssembler::li(high.gp(), Operand(high_word));
+
+ sw(low.gp(), dst);
+ sw(high.gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ lw(reg.gp(), src);
+ break;
+ case kWasmI64:
+ lw(reg.low_gp(), src);
+ lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ break;
+ case kWasmF32:
+ lwc1(reg.fp(), src);
+ break;
+ case kWasmF64:
+ TurboAssembler::Ldc1(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- UNIMPLEMENTED(); \
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
+ lw(reg, liftoff::GetHalfStackSlot(half_index));
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_GP_UNOP(name) \
- bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+
+// clang-format off
+I32_BINOP(add, addu)
+I32_BINOP(sub, subu)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ emit_i32_add(dst, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
+
+I32_SHIFTOP(shl, sllv)
+I32_SHIFTOP(sar, srav)
+I32_SHIFTOP(shr, srlv)
+
+#undef I32_SHIFTOP
+
+#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
- }
-
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-
-#undef UNIMPLEMENTED_GP_BINOP
-#undef UNIMPLEMENTED_GP_UNOP
+ instruction(dst, lhs, rhs); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+
+FP_BINOP(f32_add, add_s)
+FP_BINOP(f32_sub, sub_s)
+FP_BINOP(f32_mul, mul_s)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f64_add, add_d)
+FP_BINOP(f64_sub, sub_d)
+FP_BINOP(f64_mul, mul_d)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
+
+#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ }
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ Label true_label;
+ if (dst != lhs) {
+ ori(dst, zero_reg, 0x1);
+ }
+
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
+ }
+ // If not true, set on 0.
+ TurboAssembler::mov(dst, zero_reg);
+
+ if (dst != lhs) {
+ bind(&true_label);
+ } else {
+ Label end_label;
+ TurboAssembler::Branch(&end_label);
+ bind(&true_label);
+
+ ori(dst, zero_reg, 0x1);
+ bind(&end_label);
+ }
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index fd63198e24..d215f4178c 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -2,180 +2,487 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
+#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("mips64 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+namespace liftoff {
+
+// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
+// is located at sp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
+inline MemOperand GetStackSlot(uint32_t index) {
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return MemOperand(sp, -kFirstStackSlotOffset - offset);
+}
+
+inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+
+} // namespace liftoff
+
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ daddiu(sp, sp, 0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+ DCHECK_LE(bytes, kMaxInt);
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.daddiu(sp, sp, -bytes);
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type()) {
+ case kWasmI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kWasmI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kWasmF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ DCHECK_LE(offset, kMaxInt);
+ ld(dst, liftoff::GetContextOperand());
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ lw(dst, MemOperand(dst, offset));
+ } else {
+ ld(dst, MemOperand(dst, offset));
+ }
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ sd(context, liftoff::GetContextOperand());
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ ld(dst, liftoff::GetContextOperand());
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ MemOperand src_op(src_addr, offset_imm);
+ if (offset_reg != no_reg) {
+ Register src = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(src, src_addr, offset_reg);
+ src_op = MemOperand(src, offset_imm);
+ }
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulwu(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Uld(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Uldc1(dst.fp(), src_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ // TODO(ksreten): Add check if unaligned memory access
+ Register dst = no_reg;
+ if (offset_reg != no_reg) {
+ dst = GetUnusedRegister(kGpReg, pinned).gp();
+ emit_ptrsize_add(dst, dst_addr, offset_reg);
+ }
+ MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
+ : MemOperand(dst_addr, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::Ush(src.gp(), dst_op, t8);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::Usd(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Uswc1(src.fp(), dst_op, t8);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Usdc1(src.fp(), dst_op, t8);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ DCHECK_NE(dst_index, src_index);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ LiftoffRegister dst = reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ sw(reg.gp(), dst);
+ break;
+ case kWasmI64:
+ sd(reg.gp(), dst);
+ break;
+ case kWasmF32:
+ swc1(reg.fp(), dst);
+ break;
+ case kWasmF64:
+ TurboAssembler::Sdc1(reg.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ RecordUsedSpillSlot(index);
+ MemOperand dst = liftoff::GetStackSlot(index);
+ switch (value.type()) {
+ case kWasmI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ sw(tmp.gp(), dst);
+ break;
+ }
+ case kWasmI64: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ sd(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ lw(reg.gp(), src);
+ break;
+ case kWasmI64:
+ ld(reg.gp(), src);
+ break;
+ case kWasmF32:
+ lwc1(reg.fp(), src);
+ break;
+ case kWasmF64:
+ TurboAssembler::Ldc1(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- UNIMPLEMENTED(); \
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_GP_UNOP(name) \
- bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+
+// clang-format off
+I32_BINOP(add, addu)
+I32_BINOP(sub, subu)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
+ Register rhs) {
+ TurboAssembler::Daddu(dst, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz(dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
+ instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_BINOP(name) \
+
+I32_SHIFTOP(shl, sllv)
+I32_SHIFTOP(sar, srav)
+I32_SHIFTOP(shr, srlv)
+
+#undef I32_SHIFTOP
+
+#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
- }
-
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-
-#undef UNIMPLEMENTED_GP_BINOP
-#undef UNIMPLEMENTED_GP_UNOP
+ instruction(dst, lhs, rhs); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+
+FP_BINOP(f32_add, add_s)
+FP_BINOP(f32_sub, sub_s)
+FP_BINOP(f32_mul, mul_s)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f64_add, add_d)
+FP_BINOP(f64_sub, sub_d)
+FP_BINOP(f64_mul, mul_d)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
+
+#undef FP_BINOP
#undef UNIMPLEMENTED_FP_BINOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ }
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ Label true_label;
+ if (dst != lhs) {
+ ori(dst, zero_reg, 0x1);
+ }
+
+ if (rhs != no_reg) {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
+ } else {
+ TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
+ }
+ // If not true, set on 0.
+ TurboAssembler::mov(dst, zero_reg);
+
+ if (dst != lhs) {
+ bind(&true_label);
+ } else {
+ Label end_label;
+ TurboAssembler::Branch(&end_label);
+ bind(&true_label);
+
+ ori(dst, zero_reg, 0x1);
+ bind(&end_label);
+ }
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()), 0);
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 2d62d88dec..efbb6896d6 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+#ifndef V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
+#define V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("ppc " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_PPC_LIFTOFF_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index eebb8e4720..62145fadca 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -2,86 +2,125 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+#ifndef V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
+#define V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
#include "src/wasm/baseline/liftoff-assembler.h"
+#define BAILOUT(reason) bailout("s390 " reason)
+
namespace v8 {
namespace internal {
namespace wasm {
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) { UNIMPLEMENTED(); }
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ BAILOUT("PrepareStackFrame");
+ return 0;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ BAILOUT("PatchPrepareStackFrame");
+}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
- UNIMPLEMENTED();
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ BAILOUT("LoadConstant");
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
- UNIMPLEMENTED();
+ BAILOUT("LoadFromContext");
}
-void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
+void LiftoffAssembler::SpillContext(Register context) {
+ BAILOUT("SpillContext");
+}
-void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
+void LiftoffAssembler::FillContextInto(Register dst) {
+ BAILOUT("FillContextInto");
+}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- UNIMPLEMENTED();
+ BAILOUT("Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
- UNIMPLEMENTED();
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ BAILOUT("LoadCallerFrameSlot");
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
+ BAILOUT("MoveStackValue");
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("MoveToReturnRegister");
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ BAILOUT("Move Register");
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ BAILOUT("Move DoubleRegister");
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- UNIMPLEMENTED();
+ BAILOUT("Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
- UNIMPLEMENTED();
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
+ BAILOUT("Fill");
+}
+
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ BAILOUT("FillI64Half");
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp binop"); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- UNIMPLEMENTED(); \
+ BAILOUT("gp unop"); \
+ return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- UNIMPLEMENTED(); \
+ BAILOUT("fp binop"); \
+ }
+#define UNIMPLEMENTED_FP_UNOP(name) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ BAILOUT("fp unop"); \
+ }
+#define UNIMPLEMENTED_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
+ LiftoffRegList pinned) { \
+ BAILOUT("shiftop"); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -90,10 +129,9 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_GP_BINOP(i32_shl)
-UNIMPLEMENTED_GP_BINOP(i32_sar)
-UNIMPLEMENTED_GP_BINOP(i32_shr)
-UNIMPLEMENTED_GP_UNOP(i32_eqz)
+UNIMPLEMENTED_SHIFTOP(i32_shl)
+UNIMPLEMENTED_SHIFTOP(i32_sar)
+UNIMPLEMENTED_SHIFTOP(i32_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
@@ -101,81 +139,115 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_BINOP(f64_add)
+UNIMPLEMENTED_FP_BINOP(f64_sub)
+UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_UNOP(f64_neg)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
+#undef UNIMPLEMENTED_FP_UNOP
+#undef UNIMPLEMENTED_SHIFTOP
-void LiftoffAssembler::emit_i32_test(Register reg) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ BAILOUT("emit_cond_jump");
}
-void LiftoffAssembler::emit_jump(Label* label) { UNIMPLEMENTED(); }
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ BAILOUT("emit_i32_set_cond");
+}
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
- UNIMPLEMENTED();
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { UNIMPLEMENTED(); }
+void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
-void LiftoffAssembler::CallTrapCallbackForTesting() { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ BAILOUT("CallTrapCallbackForTesting");
+}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- UNIMPLEMENTED();
+ BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
- UNIMPLEMENTED();
+ uint32_t src_index,
+ RegPairHalf half) {
+ BAILOUT("PushCallerFrameSlot");
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- UNIMPLEMENTED();
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ BAILOUT("PushCallerFrameSlot reg");
}
-void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ BAILOUT("PushRegisters");
+}
-void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ BAILOUT("PopRegisters");
+}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- UNIMPLEMENTED();
+ BAILOUT("DropStackSlotsAndRet");
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- UNIMPLEMENTED();
+ BAILOUT("PrepareCCall");
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
uint32_t param_idx,
uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- UNIMPLEMENTED();
+ BAILOUT("CallC");
}
-void LiftoffAssembler::CallNativeWasmCode(Address addr) { UNIMPLEMENTED(); }
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ BAILOUT("CallNativeWasmCode");
+}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- UNIMPLEMENTED();
+ BAILOUT("CallRuntime");
+}
+
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ BAILOUT("CallIndirect");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- UNIMPLEMENTED();
+ BAILOUT("AllocateStackSlot");
}
-void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { UNIMPLEMENTED(); }
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ BAILOUT("DeallocateStackSlot");
+}
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_H_
+#undef BAILOUT
+
+#endif // V8_WASM_BASELINE_S390_LIFTOFF_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 2b3b750fc4..c1f316072d 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
-#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+#ifndef V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
+#define V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -16,12 +16,20 @@ namespace wasm {
namespace liftoff {
+// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
+// is located at rbp-24.
+constexpr int32_t kConstantStackSpace = 16;
+constexpr int32_t kFirstStackSlotOffset =
+ kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
+
inline Operand GetStackSlot(uint32_t index) {
- // rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
- // is located at rbp-24.
- constexpr int32_t kFirstStackSlotOffset = -24;
- return Operand(
- rbp, kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize;
+ return Operand(rbp, -kFirstStackSlotOffset - offset);
+}
+
+inline Operand GetHalfStackSlot(uint32_t half_index) {
+ int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+ return Operand(rbp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
@@ -31,25 +39,58 @@ inline Operand GetContextOperand() { return Operand(rbp, -16); }
// stack for a call to C.
static constexpr Register kCCallLastArgAddrReg = rax;
+inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
+ uint32_t offset_imm, LiftoffRegList pinned) {
+ // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
+ // immediate value (in 31 bits, interpreted as signed value).
+ // If the offset is bigger, we always trap and this code is not reached.
+ DCHECK(is_uint31(offset_imm));
+ if (offset == no_reg) return Operand(addr, offset_imm);
+ return Operand(addr, offset, times_1, offset_imm);
+}
+
} // namespace liftoff
-void LiftoffAssembler::ReserveStackSpace(uint32_t bytes) {
+uint32_t LiftoffAssembler::PrepareStackFrame() {
+ uint32_t offset = static_cast<uint32_t>(pc_offset());
+ sub_sp_32(0);
+ return offset;
+}
+
+void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
+ uint32_t stack_slots) {
+ uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
- subp(rsp, Immediate(bytes));
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 64;
+ Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
+ patching_assembler.sub_sp_32(bytes);
}
-void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
switch (value.type()) {
case kWasmI32:
- if (value.to_i32() == 0) {
+ if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
- movl(reg.gp(), Immediate(value.to_i32()));
+ movl(reg.gp(), Immediate(value.to_i32(), rmode));
+ }
+ break;
+ case kWasmI64:
+ if (RelocInfo::IsNone(rmode)) {
+ TurboAssembler::Set(reg.gp(), value.to_i64());
+ } else {
+ movq(reg.gp(), value.to_i64(), rmode);
}
break;
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
+ case kWasmF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
default:
UNREACHABLE();
}
@@ -79,42 +120,46 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
- Operand src_op = offset_reg == no_reg
- ? Operand(src_addr, offset_imm)
- : Operand(src_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register src = GetUnusedRegister(kGpReg, pinned).gp();
- movl(src, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(src, src, offset_reg);
- }
- src_op = Operand(src_addr, src, times_1, 0);
- }
+ Operand src_op =
+ liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
movzxbl(dst.gp(), src_op);
break;
case LoadType::kI32Load8S:
movsxbl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load8S:
+ movsxbq(dst.gp(), src_op);
+ break;
case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
movzxwl(dst.gp(), src_op);
break;
case LoadType::kI32Load16S:
movsxwl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load16S:
+ movsxwq(dst.gp(), src_op);
+ break;
case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
movl(dst.gp(), src_op);
break;
+ case LoadType::kI64Load32S:
+ movsxlq(dst.gp(), src_op);
+ break;
case LoadType::kI64Load:
movq(dst.gp(), src_op);
break;
case LoadType::kF32Load:
Movss(dst.fp(), src_op);
break;
+ case LoadType::kF64Load:
+ Movsd(dst.fp(), src_op);
+ break;
default:
UNREACHABLE();
}
@@ -124,28 +169,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
- Operand dst_op = offset_reg == no_reg
- ? Operand(dst_addr, offset_imm)
- : Operand(dst_addr, offset_reg, times_1, offset_imm);
- if (offset_imm > kMaxInt) {
- // The immediate can not be encoded in the operand. Load it to a register
- // first.
- Register dst = GetUnusedRegister(kGpReg, pinned).gp();
- movl(dst, Immediate(offset_imm));
- if (offset_reg != no_reg) {
- emit_ptrsize_add(dst, dst, offset_reg);
- }
- dst_op = Operand(dst_addr, dst, times_1, 0);
- }
+ Operand dst_op =
+ liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
movb(dst_op, src.gp());
break;
case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
movw(dst_op, src.gp());
break;
case StoreType::kI32Store:
+ case StoreType::kI64Store32:
movl(dst_op, src.gp());
break;
case StoreType::kI64Store:
@@ -154,72 +191,118 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kF32Store:
Movss(dst_op, src.fp());
break;
+ case StoreType::kF64Store:
+ Movsd(dst_op, src.fp());
+ break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
- uint32_t caller_slot_idx) {
+ uint32_t caller_slot_idx,
+ ValueType type) {
Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- movq(dst.gp(), src);
- } else {
- Movsd(dst.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ movl(dst.gp(), src);
+ break;
+ case kWasmI64:
+ movq(dst.gp(), src);
+ break;
+ case kWasmF32:
+ Movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ Movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
+void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
+ ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index);
- Spill(dst_index, reg);
+ Fill(reg, src_index, type);
+ Spill(dst_index, reg, type);
} else {
pushq(liftoff::GetStackSlot(src_index));
popq(liftoff::GetStackSlot(dst_index));
}
}
-void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
+void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
+ ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_gp() ? LiftoffRegister(rax) : LiftoffRegister(xmm1);
- if (reg != dst) Move(dst, reg);
+ if (reg != dst) Move(dst, reg, type);
}
-void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
- // The caller should check that the registers are not equal. For most
- // occurences, this is already guaranteed, so no need to check within this
- // method.
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
DCHECK_NE(dst, src);
- DCHECK_EQ(dst.reg_class(), src.reg_class());
- // TODO(clemensh): Handle different sizes here.
- if (dst.is_gp()) {
- movq(dst.gp(), src.gp());
+ if (type == kWasmI32) {
+ movl(dst, src);
} else {
- Movsd(dst.fp(), src.fp());
+ DCHECK_EQ(kWasmI64, type);
+ movq(dst, src);
}
}
-void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
- Operand dst = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- movq(dst, reg.gp());
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ if (type == kWasmF32) {
+ Movss(dst, src);
} else {
- Movsd(dst, reg.fp());
+ DCHECK_EQ(kWasmF64, type);
+ Movsd(dst, src);
+ }
+}
+
+void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
+ ValueType type) {
+ RecordUsedSpillSlot(index);
+ Operand dst = liftoff::GetStackSlot(index);
+ switch (type) {
+ case kWasmI32:
+ movl(dst, reg.gp());
+ break;
+ case kWasmI64:
+ movq(dst, reg.gp());
+ break;
+ case kWasmF32:
+ Movss(dst, reg.fp());
+ break;
+ case kWasmF64:
+ Movsd(dst, reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
+ RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
movl(dst, Immediate(value.to_i32()));
break;
+ case kWasmI64: {
+ // We could use movq, but this would require a temporary register. For
+ // simplicity (and to avoid potentially having to spill another register),
+ // we use two movl instructions.
+ int32_t low_word = static_cast<int32_t>(value.to_i64());
+ int32_t high_word = static_cast<int32_t>(value.to_i64() >> 32);
+ movl(dst, Immediate(low_word));
+ movl(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ break;
+ }
case kWasmF32:
movl(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
@@ -228,16 +311,31 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
+ ValueType type) {
Operand src = liftoff::GetStackSlot(index);
- // TODO(clemensh): Handle different sizes here.
- if (reg.is_gp()) {
- movq(reg.gp(), src);
- } else {
- Movsd(reg.fp(), src);
+ switch (type) {
+ case kWasmI32:
+ movl(reg.gp(), src);
+ break;
+ case kWasmI64:
+ movq(reg.gp(), src);
+ break;
+ case kWasmF32:
+ Movss(reg.fp(), src);
+ break;
+ case kWasmF64:
+ Movsd(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
}
}
+void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
+ UNREACHABLE();
+}
+
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
leal(dst, Operand(lhs, rhs, times_1, 0));
@@ -279,7 +377,8 @@ COMMUTATIVE_I32_BINOP(xor, xor)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register lhs, Register rhs,
- void (Assembler::*emit_shift)(Register)) {
+ void (Assembler::*emit_shift)(Register),
+ LiftoffRegList pinned) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
assm->movl(kScratchRegister, lhs);
@@ -293,9 +392,10 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
// register. If lhs is rcx, lhs is now the scratch register.
bool use_scratch = false;
if (rhs != rcx) {
- use_scratch =
- lhs == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
- if (use_scratch) assm->movl(kScratchRegister, rcx);
+ use_scratch = lhs == rcx ||
+ assm->cache_state()->is_used(LiftoffRegister(rcx)) ||
+ pinned.has(LiftoffRegister(rcx));
+ if (use_scratch) assm->movq(kScratchRegister, rcx);
if (lhs == rcx) lhs = kScratchRegister;
assm->movl(rcx, rhs);
}
@@ -305,27 +405,23 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
(assm->*emit_shift)(dst);
// Restore rcx if needed.
- if (use_scratch) assm->movl(rcx, kScratchRegister);
+ if (use_scratch) assm->movq(rcx, kScratchRegister);
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl);
+void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl, pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl);
+void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl, pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl);
-}
-
-bool LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- testl(src, src);
- setcc(zero, dst);
- movzxbl(dst, dst);
- return true;
+void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+ LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl, pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -419,18 +515,128 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
-void LiftoffAssembler::emit_i32_test(Register reg) { testl(reg, reg); }
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorps(dst, src);
+ }
+}
+
+void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vaddsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ addsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ addsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ subsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ subsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmulsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ mulsd(dst, lhs);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ mulsd(dst, rhs);
+ }
+}
-void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
- cmpl(lhs, rhs);
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit);
+ Xorpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit);
+ Xorpd(dst, src);
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
-void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
+ ValueType type, Register lhs,
+ Register rhs) {
+ if (rhs != no_reg) {
+ switch (type) {
+ case kWasmI32:
+ cmpl(lhs, rhs);
+ break;
+ case kWasmI64:
+ cmpq(lhs, rhs);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(type, kWasmI32);
+ testl(lhs, lhs);
+ }
+
j(cond, label);
}
+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
+ Register lhs, Register rhs) {
+ if (rhs != no_reg) {
+ cmpl(lhs, rhs);
+ } else {
+ testl(lhs, lhs);
+ }
+
+ setcc(cond, dst);
+ movzxbl(dst, dst);
+}
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label cont;
+ Label not_nan;
+
+ Ucomiss(lhs, rhs);
+ // IF PF is one, one of the operands was Nan. This needs special handling.
+ j(parity_odd, &not_nan, Label::kNear);
+ // Return 1 for f32.ne, 0 for all other cases.
+ if (cond == not_equal) {
+ movl(dst, Immediate(1));
+ } else {
+ xorl(dst, dst);
+ }
+ jmp(&cont, Label::kNear);
+ bind(&not_nan);
+
+ setcc(cond, dst);
+ movzxbl(dst, dst);
+ bind(&cont);
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) {
Register limit = GetUnusedRegister(kGpReg).gp();
LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate()));
@@ -449,26 +655,37 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
- uint32_t src_index) {
+ uint32_t src_index, RegPairHalf) {
switch (src.loc()) {
case VarState::kStack:
pushq(liftoff::GetStackSlot(src_index));
break;
case VarState::kRegister:
- PushCallerFrameSlot(src.reg());
+ PushCallerFrameSlot(src.reg(), src.type());
break;
- case VarState::kI32Const:
+ case VarState::KIntConst:
pushq(Immediate(src.i32_const()));
break;
}
}
-void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg) {
- if (reg.is_gp()) {
- pushq(reg.gp());
- } else {
- subp(rsp, Immediate(kPointerSize));
- Movsd(Operand(rsp, 0), reg.fp());
+void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ case kWasmI64:
+ pushq(reg.gp());
+ break;
+ case kWasmF32:
+ subp(rsp, Immediate(kPointerSize));
+ Movss(Operand(rsp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ subp(rsp, Immediate(kPointerSize));
+ Movsd(Operand(rsp, 0), reg.fp());
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -552,6 +769,16 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
CallRuntimeDelayed(zone, fid);
}
+void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ popq(kScratchRegister);
+ target = kScratchRegister;
+ }
+ call(target);
+}
+
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
subp(rsp, Immediate(size));
movp(addr, rsp);
@@ -565,4 +792,4 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
+#endif // V8_WASM_BASELINE_X64_LIFTOFF_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/wasm/compilation-manager.cc b/deps/v8/src/wasm/compilation-manager.cc
index a19a228f1f..4779a9f423 100644
--- a/deps/v8/src/wasm/compilation-manager.cc
+++ b/deps/v8/src/wasm/compilation-manager.cc
@@ -4,6 +4,7 @@
#include "src/wasm/compilation-manager.h"
#include "src/base/template-utils.h"
+#include "src/wasm/module-compiler.h"
#include "src/objects-inl.h"
@@ -46,6 +47,15 @@ std::shared_ptr<AsyncCompileJob> CompilationManager::RemoveJob(
void CompilationManager::TearDown() { jobs_.clear(); }
+void CompilationManager::AbortAllJobs() {
+ // Iterate over a copy of {jobs_}, because {job->Abort} modifies {jobs_}.
+ std::vector<AsyncCompileJob*> copy;
+
+ for (auto entry : jobs_) copy.push_back(entry.first);
+
+ for (auto job : copy) job->Abort();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/compilation-manager.h b/deps/v8/src/wasm/compilation-manager.h
index e359b11c26..279f3e872e 100644
--- a/deps/v8/src/wasm/compilation-manager.h
+++ b/deps/v8/src/wasm/compilation-manager.h
@@ -9,12 +9,13 @@
#include "src/handles.h"
#include "src/isolate.h"
-#include "src/wasm/module-compiler.h"
namespace v8 {
namespace internal {
namespace wasm {
+class AsyncCompileJob;
+
// The CompilationManager manages a list of active WebAssembly compile jobs. The
// manager owns the memory of the compile jobs and can trigger the abortion of
// compile jobs. If the isolate tears down, the CompilationManager makes sure
@@ -29,11 +30,17 @@ class CompilationManager {
std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise);
- // Removes {job} from the list of active compile jobs.
+ // Remove {job} from the list of active compile jobs.
std::shared_ptr<AsyncCompileJob> RemoveJob(AsyncCompileJob* job);
+ // Cancel all AsyncCompileJobs and delete their state immediately.
void TearDown();
+ // Cancel all AsyncCompileJobs so that they are not processed any further,
+ // but delay the deletion of their state until all tasks accessing the
+ // AsyncCompileJob finish their execution.
+ void AbortAllJobs();
+
private:
AsyncCompileJob* CreateAsyncCompileJob(Isolate* isolate,
std::unique_ptr<byte[]> bytes_copy,
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 04d918b0a4..98aad07fcb 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -37,6 +37,12 @@ struct WasmException;
return true; \
}())
+#define RET_ON_PROTOTYPE_OPCODE(flag) \
+ DCHECK(!this->module_ || !this->module_->is_asm_js()); \
+ if (!FLAG_experimental_wasm_##flag) { \
+ this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
+ }
+
#define CHECK_PROTOTYPE_OPCODE(flag) \
DCHECK(!this->module_ || !this->module_->is_asm_js()); \
if (!FLAG_experimental_wasm_##flag) { \
@@ -50,25 +56,25 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \
V(I32AtomicLoad, Uint32) \
- V(I32AtomicAdd, Uint32) \
- V(I32AtomicSub, Uint32) \
- V(I32AtomicAnd, Uint32) \
- V(I32AtomicOr, Uint32) \
- V(I32AtomicXor, Uint32) \
- V(I32AtomicExchange, Uint32) \
V(I32AtomicLoad8U, Uint8) \
- V(I32AtomicAdd8U, Uint8) \
- V(I32AtomicSub8U, Uint8) \
- V(I32AtomicAnd8U, Uint8) \
- V(I32AtomicOr8U, Uint8) \
- V(I32AtomicXor8U, Uint8) \
- V(I32AtomicExchange8U, Uint8) \
V(I32AtomicLoad16U, Uint16) \
+ V(I32AtomicAdd, Uint32) \
+ V(I32AtomicAdd8U, Uint8) \
V(I32AtomicAdd16U, Uint16) \
+ V(I32AtomicSub, Uint32) \
+ V(I32AtomicSub8U, Uint8) \
V(I32AtomicSub16U, Uint16) \
+ V(I32AtomicAnd, Uint32) \
+ V(I32AtomicAnd8U, Uint8) \
V(I32AtomicAnd16U, Uint16) \
+ V(I32AtomicOr, Uint32) \
+ V(I32AtomicOr8U, Uint8) \
V(I32AtomicOr16U, Uint16) \
+ V(I32AtomicXor, Uint32) \
+ V(I32AtomicXor8U, Uint8) \
V(I32AtomicXor16U, Uint16) \
+ V(I32AtomicExchange, Uint32) \
+ V(I32AtomicExchange8U, Uint8) \
V(I32AtomicExchange16U, Uint16) \
V(I32AtomicCompareExchange, Uint32) \
V(I32AtomicCompareExchange8U, Uint8) \
@@ -246,12 +252,12 @@ struct BreakDepthOperand {
template <Decoder::ValidateFlag validate>
struct CallIndirectOperand {
uint32_t table_index;
- uint32_t index;
+ uint32_t sig_index;
FunctionSig* sig = nullptr;
unsigned length = 0;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
- index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
+ sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
if (!VALIDATE(decoder->ok())) return;
table_index = decoder->read_u8<validate>(pc + 1 + len, "table index");
if (!VALIDATE(table_index == 0)) {
@@ -648,7 +654,8 @@ class WasmDecoder : public Decoder {
uint32_t count = decoder->consume_u32v("local count");
if (decoder->failed()) return false;
- if ((count + type_list->size()) > kV8MaxWasmFunctionLocals) {
+ DCHECK_LE(type_list->size(), kV8MaxWasmFunctionLocals);
+ if (count > kV8MaxWasmFunctionLocals - type_list->size()) {
decoder->error(decoder->pc() - 1, "local count too large");
return false;
}
@@ -674,7 +681,7 @@ class WasmDecoder : public Decoder {
type = kWasmS128;
break;
}
- // else fall through to default.
+ V8_FALLTHROUGH;
default:
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
@@ -789,10 +796,10 @@ class WasmDecoder : public Decoder {
inline bool Complete(const byte* pc, CallIndirectOperand<validate>& operand) {
if (!VALIDATE(module_ != nullptr &&
- operand.index < module_->signatures.size())) {
+ operand.sig_index < module_->signatures.size())) {
return false;
}
- operand.sig = module_->signatures[operand.index];
+ operand.sig = module_->signatures[operand.sig_index];
return true;
}
@@ -802,7 +809,7 @@ class WasmDecoder : public Decoder {
return false;
}
if (!Complete(pc, operand)) {
- errorf(pc + 1, "invalid signature index: #%u", operand.index);
+ errorf(pc + 1, "invalid signature index: #%u", operand.sig_index);
return false;
}
return true;
@@ -1097,6 +1104,7 @@ class WasmDecoder : public Decoder {
}
}
}
+ V8_FALLTHROUGH;
}
default:
V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x (%s)", opcode,
@@ -1534,8 +1542,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, operand, control_.size())) break;
Control* c = control_at(operand.depth);
if (!TypeCheckBreak(c)) break;
- CALL_INTERFACE_IF_REACHABLE(Br, c);
- BreakTo(c);
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
+ }
len = 1 + operand.length;
EndControl();
break;
@@ -1543,28 +1553,38 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBrIf: {
BreakDepthOperand<validate> operand(this, this->pc_);
auto cond = Pop(0, kWasmI32);
+ if (this->failed()) break;
if (!this->Validate(this->pc_, operand, control_.size())) break;
Control* c = control_at(operand.depth);
if (!TypeCheckBreak(c)) break;
- CALL_INTERFACE_IF_REACHABLE(BrIf, cond, c);
- BreakTo(c);
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrIf, cond, c);
+ c->br_merge()->reached = true;
+ }
len = 1 + operand.length;
break;
}
case kExprBrTable: {
BranchTableOperand<validate> operand(this, this->pc_);
BranchTableIterator<validate> iterator(this, operand);
- if (!this->Validate(this->pc_, operand, control_.size())) break;
auto key = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, operand, control_.size())) break;
uint32_t br_arity = 0;
+ std::vector<bool> br_targets(control_.size());
while (iterator.has_next()) {
const uint32_t i = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
if (!VALIDATE(target < control_.size())) {
- this->error(pos, "improper branch in br_table");
+ this->errorf(pos,
+ "improper branch in br_table target %u (depth %u)",
+ i, target);
break;
}
+ // Avoid redundant break target checks.
+ if (br_targets[target]) continue;
+ br_targets[target] = true;
// Check that label types match up.
Control* c = control_at(target);
uint32_t arity = c->br_merge()->arity;
@@ -1572,15 +1592,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
br_arity = arity;
} else if (!VALIDATE(br_arity == arity)) {
this->errorf(pos,
- "inconsistent arity in br_table target %d"
+ "inconsistent arity in br_table target %u"
" (previous was %u, this one %u)",
i, br_arity, arity);
}
if (!TypeCheckBreak(c)) break;
- BreakTo(c);
}
+ if (this->failed()) break;
+
+ if (control_.back().reachable()) {
+ CALL_INTERFACE(BrTable, operand, key);
- CALL_INTERFACE_IF_REACHABLE(BrTable, operand, key);
+ for (uint32_t depth = control_depth(); depth-- > 0;) {
+ if (!br_targets[depth]) continue;
+ control_at(depth)->br_merge()->reached = true;
+ }
+ }
len = 1 + iterator.length();
EndControl();
@@ -2249,10 +2276,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
- inline void BreakTo(Control* c) {
- if (control_.back().reachable()) c->br_merge()->reached = true;
- }
-
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
if (!TypeCheckFallThru(c)) return;
@@ -2344,6 +2367,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
inline void BuildSimpleOperator(WasmOpcode opcode, FunctionSig* sig) {
+ if (WasmOpcodes::IsSignExtensionOpcode(opcode)) {
+ RET_ON_PROTOTYPE_OPCODE(se);
+ }
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 57ee78f91c..217a5ff3b1 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -369,13 +369,13 @@ class WasmGraphBuildingInterface {
void CallDirect(Decoder* decoder,
const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, nullptr, operand, args, returns, false);
+ DoCall(decoder, nullptr, operand.sig, operand.index, args, returns);
}
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
- DoCall(decoder, index.node, operand, args, returns, true);
+ DoCall(decoder, index.node, operand.sig, operand.sig_index, args, returns);
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
@@ -782,30 +782,29 @@ class WasmGraphBuildingInterface {
return result;
}
- template <typename Operand>
void DoCall(WasmFullDecoder<validate, WasmGraphBuildingInterface>* decoder,
- TFNode* index_node, const Operand& operand, const Value args[],
- Value returns[], bool is_indirect) {
- int param_count = static_cast<int>(operand.sig->parameter_count());
+ TFNode* index_node, FunctionSig* sig, uint32_t index,
+ const Value args[], Value returns[]) {
+ int param_count = static_cast<int>(sig->parameter_count());
TFNode** arg_nodes = builder_->Buffer(param_count + 1);
TFNode** return_nodes = nullptr;
arg_nodes[0] = index_node;
for (int i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
- if (is_indirect) {
- builder_->CallIndirect(operand.index, arg_nodes, &return_nodes,
+ if (index_node) {
+ builder_->CallIndirect(index, arg_nodes, &return_nodes,
decoder->position());
} else {
- builder_->CallDirect(operand.index, arg_nodes, &return_nodes,
+ builder_->CallDirect(index, arg_nodes, &return_nodes,
decoder->position());
}
- int return_count = static_cast<int>(operand.sig->return_count());
+ int return_count = static_cast<int>(sig->return_count());
for (int i = 0; i < return_count; ++i) {
returns[i].node = return_nodes[i];
}
// The invoked function could have used grow_memory, so we need to
- // reload mem_size and mem_start
+ // reload mem_size and mem_start.
LoadContextIntoSsa(ssa_env_);
}
};
@@ -1002,7 +1001,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
case kExprCallIndirect: {
CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
- os << " // sig #" << operand.index;
+ os << " // sig #" << operand.sig_index;
if (decoder.Complete(i.pc(), operand)) {
os << ": " << *operand.sig;
}
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 4a2e610b99..0a09feddf2 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -207,18 +207,12 @@ class ModuleCompiler {
compiler::ModuleEnv* module_env,
ErrorThrower* thrower);
- static MaybeHandle<WasmModuleObject> CompileToModuleObject(
- Isolate* isolate, ErrorThrower* thrower,
- std::unique_ptr<WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
-
- private:
MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes);
+ private:
Isolate* isolate_;
WasmModule* module_;
const std::shared_ptr<Counters> async_counters_;
@@ -268,7 +262,7 @@ class JSToWasmWrapperCache {
target->builtin_index() == Builtins::kIllegal ||
target->builtin_index() == Builtins::kWasmCompileLazy) {
it.rinfo()->set_target_address(
- isolate, wasm_code.GetCode()->instruction_start());
+ wasm_code.GetCode()->instruction_start());
break;
}
}
@@ -277,9 +271,9 @@ class JSToWasmWrapperCache {
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
DCHECK(!it.done());
it.rinfo()->set_js_to_wasm_address(
- isolate, wasm_code.is_null()
- ? nullptr
- : wasm_code.GetWasmCode()->instructions().start());
+ wasm_code.is_null()
+ ? nullptr
+ : wasm_code.GetWasmCode()->instructions().start());
}
return code;
}
@@ -308,11 +302,12 @@ class InstanceBuilder {
InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback);
+ MaybeHandle<JSArrayBuffer> memory);
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> Build();
+ // Run the start function, if any.
+ bool ExecuteStartFunction();
private:
// Represents the initialized state of a table.
@@ -340,8 +335,8 @@ class InstanceBuilder {
Handle<WasmCompiledModule> compiled_module_;
std::vector<TableInstance> table_instances_;
std::vector<Handle<JSFunction>> js_wrappers_;
+ Handle<WasmExportedFunction> start_function_;
JSToWasmWrapperCache js_to_wasm_cache_;
- WeakCallbackInfo<void>::Callback instance_finalizer_callback_;
std::vector<SanitizedImport> sanitized_imports_;
const std::shared_ptr<Counters>& async_counters() const {
@@ -424,91 +419,6 @@ class InstanceBuilder {
Handle<WasmInstanceObject> instance);
};
-// TODO(titzer): move to wasm-objects.cc
-void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
- Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- // If a link to shared memory instances exists, update the list of memory
- // instances before the instance is destroyed.
- WasmCompiledModule* compiled_module = owner->compiled_module();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- if (FLAG_wasm_jit_to_native) {
- if (native_module) {
- TRACE("Finalizing %zu {\n", native_module->instance_id);
- } else {
- TRACE("Finalized already cleaned up compiled module\n");
- }
- } else {
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
-
- if (compiled_module->use_trap_handler()) {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- DisallowHeapAllocation no_gc;
- FixedArray* code_table = compiled_module->code_table();
- for (int i = 0; i < code_table->length(); ++i) {
- Code* code = Code::cast(code_table->get(i));
- int index = code->trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::ReleaseHandlerData(index);
- code->set_trap_handler_index(
- Smi::FromInt(trap_handler::kInvalidIndex));
- }
- }
- }
- }
- WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
-
- // Since the order of finalizers is not guaranteed, it can be the case
- // that {instance->compiled_module()->module()}, which is a
- // {Managed<WasmModule>} has been collected earlier in this GC cycle.
- // Weak references to this instance won't be cleared until
- // the next GC cycle, so we need to manually break some links (such as
- // the weak references from {WasmMemoryObject::instances}.
- if (owner->has_memory_object()) {
- Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
- Handle<WasmInstanceObject> instance(owner, isolate);
- WasmMemoryObject::RemoveInstance(isolate, memory, instance);
- }
-
- // weak_wasm_module may have been cleared, meaning the module object
- // was GC-ed. We still want to maintain the links between instances, to
- // release the WasmCompiledModule corresponding to the WasmModuleInstance
- // being finalized here.
- WasmModuleObject* wasm_module = nullptr;
- if (!weak_wasm_module->cleared()) {
- wasm_module = WasmModuleObject::cast(weak_wasm_module->value());
- WasmCompiledModule* current_template = wasm_module->compiled_module();
-
- TRACE("chain before {\n");
- TRACE_CHAIN(current_template);
- TRACE("}\n");
-
- DCHECK(!current_template->has_prev_instance());
- if (current_template == compiled_module) {
- if (!compiled_module->has_next_instance()) {
- WasmCompiledModule::Reset(isolate, compiled_module);
- } else {
- WasmModuleObject::cast(wasm_module)
- ->set_compiled_module(compiled_module->next_instance());
- }
- }
- }
-
- compiled_module->RemoveFromChain();
-
- if (wasm_module != nullptr) {
- TRACE("chain after {\n");
- TRACE_CHAIN(wasm_module->compiled_module());
- TRACE("}\n");
- }
- compiled_module->reset_weak_owning_instance();
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
- TRACE("}\n");
-}
-
// This is used in ProcessImports.
// When importing other modules' exports, we need to ask
// the exporter for a WasmToWasm wrapper. To do that, we need to
@@ -517,8 +427,9 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
class SetOfNativeModuleModificationScopes final {
public:
void Add(NativeModule* module) {
- module->SetExecutable(false);
- native_modules_.insert(module);
+ if (native_modules_.insert(module).second) {
+ module->SetExecutable(false);
+ }
}
~SetOfNativeModuleModificationScopes() {
@@ -531,138 +442,28 @@ class SetOfNativeModuleModificationScopes final {
std::unordered_set<NativeModule*> native_modules_;
};
-} // namespace
-
-MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kAsmJsOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
+void EnsureWasmContextTable(WasmContext* wasm_context, int table_size) {
+ if (wasm_context->table) return;
+ wasm_context->table_size = table_size;
+ wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
+ calloc(table_size, sizeof(IndirectFunctionTableEntry)));
+ for (int i = 0; i < table_size; i++) {
+ wasm_context->table[i].sig_id = kInvalidSigIndex;
}
-
- // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
- // in {CompileToModuleObject}.
- return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes, asm_js_script,
- asm_js_offset_table_bytes);
}
-MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
- ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kWasmOrigin);
- if (result.failed()) {
- thrower->CompileFailed("Wasm decoding failed", result);
- return {};
- }
-
- // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
- // in {CompileToModuleObject}.
- return ModuleCompiler::CompileToModuleObject(
- isolate, thrower, std::move(result.val), bytes, Handle<Script>(),
- Vector<const byte>());
-}
+} // namespace
-MaybeHandle<WasmInstanceObject> SyncInstantiate(
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory) {
- InstanceBuilder builder(isolate, thrower, module_object, imports, memory,
- &InstanceFinalizer);
- return builder.Build();
-}
-
-MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory) {
- MaybeHandle<WasmModuleObject> module = SyncCompile(isolate, thrower, bytes);
- DCHECK_EQ(thrower->error(), module.is_null());
- if (module.is_null()) return {};
-
- return SyncInstantiate(isolate, thrower, module.ToHandleChecked(), imports,
- memory);
-}
-
-void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower& thrower, Handle<JSPromise> promise) {
- Local<Promise::Resolver> resolver =
- Utils::PromiseToLocal(promise).As<Promise::Resolver>();
- auto maybe = resolver->Reject(Utils::ToLocal(context),
- Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void ResolvePromise(Isolate* isolate, Handle<Context> context,
- Handle<JSPromise> promise, Handle<Object> result) {
- Local<Promise::Resolver> resolver =
- Utils::PromiseToLocal(promise).As<Promise::Resolver>();
- auto maybe =
- resolver->Resolve(Utils::ToLocal(context), Utils::ToLocal(result));
- CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
-}
-
-void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, nullptr);
- MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
- isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
+ InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
+ auto instance = builder.Build();
+ if (!instance.is_null() && builder.ExecuteStartFunction()) {
+ return instance;
}
- ResolvePromise(isolate, handle(isolate->context()), promise,
- instance_object.ToHandleChecked());
-}
-
-void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes, bool is_shared) {
- if (!FLAG_wasm_async_compilation) {
- // Asynchronous compilation disabled; fall back on synchronous compilation.
- ErrorThrower thrower(isolate, "WasmCompile");
- MaybeHandle<WasmModuleObject> module_object;
- if (is_shared) {
- // Make a copy of the wire bytes to avoid concurrent modification.
- std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- i::wasm::ModuleWireBytes bytes_copy(copy.get(),
- copy.get() + bytes.length());
- module_object = SyncCompile(isolate, &thrower, bytes_copy);
- } else {
- // The wire bytes are not shared, OK to use them directly.
- module_object = SyncCompile(isolate, &thrower, bytes);
- }
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), thrower, promise);
- return;
- }
- Handle<WasmModuleObject> module = module_object.ToHandleChecked();
- ResolvePromise(isolate, handle(isolate->context()), promise, module);
- return;
- }
-
- if (FLAG_wasm_test_streaming) {
- std::shared_ptr<StreamingDecoder> streaming_decoder =
- isolate->wasm_engine()
- ->compilation_manager()
- ->StartStreamingCompilation(isolate, handle(isolate->context()),
- promise);
- streaming_decoder->OnBytesReceived(bytes.module_bytes());
- streaming_decoder->Finish();
- return;
- }
- // Make a copy of the wire bytes in case the user program changes them
- // during asynchronous compilation.
- std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
- memcpy(copy.get(), bytes.start(), bytes.length());
- isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
- isolate, std::move(copy), bytes.length(), handle(isolate->context()),
- promise);
+ return {};
}
Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
@@ -845,6 +646,7 @@ Address CompileLazy(Isolate* isolate) {
int func_index = static_cast<int>(result->index());
if (!exp_deopt_data_entry.is_null() && exp_deopt_data_entry->IsFixedArray()) {
+ int patched = 0;
Handle<FixedArray> exp_deopt_data =
Handle<FixedArray>::cast(exp_deopt_data_entry);
@@ -854,22 +656,36 @@ Address CompileLazy(Isolate* isolate) {
// See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs
// of <export_table, index> followed by undefined values. Use this
// information here to patch all export tables.
+ Address target = result->instructions().start();
Handle<Foreign> foreign_holder =
- isolate->factory()->NewForeign(result->instructions().start(), TENURED);
+ isolate->factory()->NewForeign(target, TENURED);
for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) {
if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
DisallowHeapAllocation no_gc;
int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
- exp_table->set(compiler::FunctionTableCodeOffset(exp_index),
- *foreign_holder);
+
+ if (WASM_CONTEXT_TABLES) {
+ // TODO(titzer): patching of function tables for lazy compilation
+ // only works for a single instance.
+ instance->wasm_context()->get()->table[exp_index].target = target;
+ } else {
+ int table_index = compiler::FunctionTableCodeOffset(exp_index);
+ DCHECK_EQ(Foreign::cast(exp_table->get(table_index))->foreign_address(),
+ lazy_stub_or_copy->instructions().start());
+
+ exp_table->set(table_index, *foreign_holder);
+ ++patched;
+ }
}
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
// After processing, remove the list of exported entries, such that we don't
// do the patching redundantly.
compiled_module->lazy_compile_data()->set(
func_index, isolate->heap()->undefined_value());
+ if (!WASM_CONTEXT_TABLES) {
+ DCHECK_LT(0, patched);
+ USE(patched);
+ }
}
return result->instructions().start();
@@ -880,8 +696,7 @@ compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
DisallowHeapAllocation no_gc;
WasmModule* module = compiled_module->shared()->module();
if (FLAG_wasm_jit_to_native) {
- NativeModule* native_module = compiled_module->GetNativeModule();
- compiler::ModuleEnv result(module, native_module->function_tables(),
+ compiler::ModuleEnv result(module, std::vector<Address>{},
std::vector<Handle<Code>>{},
BUILTIN_CODE(isolate, WasmCompileLazy),
compiled_module->use_trap_handler());
@@ -911,6 +726,20 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
compilation_timer.Start();
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
+
+ // TODO(wasm): Refactor this to only get the name if it is really needed for
+ // tracing / debugging.
+ std::string func_name;
+ {
+ WasmName name = Vector<const char>::cast(
+ compiled_module->shared()->GetRawFunctionName(func_index));
+ // Copy to std::string, because the underlying string object might move on
+ // the heap.
+ func_name.assign(name.start(), static_cast<size_t>(name.length()));
+ }
+
+ TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index);
+
if (FLAG_wasm_jit_to_native) {
wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
static_cast<uint32_t>(func_index));
@@ -937,16 +766,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
FunctionBody body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
- // TODO(wasm): Refactor this to only get the name if it is really needed for
- // tracing / debugging.
- std::string func_name;
- {
- WasmName name = Vector<const char>::cast(
- compiled_module->shared()->GetRawFunctionName(func_index));
- // Copy to std::string, because the underlying string object might move on
- // the heap.
- func_name.assign(name.start(), static_cast<size_t>(name.length()));
- }
+
ErrorThrower thrower(isolate, "WasmLazyCompile");
compiler::WasmCompilationUnit unit(isolate, &module_env,
compiled_module->GetNativeModule(), body,
@@ -989,7 +809,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
if (!code_wrapper.IsCodeObject()) {
const wasm::WasmCode* wasm_code = code_wrapper.GetWasmCode();
- Assembler::FlushICache(isolate, wasm_code->instructions().start(),
+ Assembler::FlushICache(wasm_code->instructions().start(),
wasm_code->instructions().size());
counters->wasm_generated_code_size()->Increment(
static_cast<int>(wasm_code->instructions().size()));
@@ -997,8 +817,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
static_cast<int>(wasm_code->reloc_info().size()));
} else {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
@@ -1062,8 +881,12 @@ const WasmCode* WasmExtractWasmToWasmCallee(const WasmCodeManager* code_manager,
wasm_to_wasm->constant_pool(), \
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); \
DCHECK(!it.done()); \
- it.rinfo()->set_js_to_wasm_address(isolate, \
- new_target->instructions().start()); \
+ DCHECK_EQ(WasmCode::kLazyStub, \
+ isolate->wasm_engine() \
+ ->code_manager() \
+ ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) \
+ ->kind()); \
+ it.rinfo()->set_js_to_wasm_address(new_target->instructions().start()); \
it.next(); \
DCHECK(it.done()); \
} while (0)
@@ -1077,7 +900,7 @@ void PatchWasmToWasmWrapper(Isolate* isolate, Code* wasm_to_wasm,
DCHECK_EQ(Builtins::kWasmCompileLazy,
Code::GetCodeFromTargetAddress(it.rinfo()->target_address())
->builtin_index());
- it.rinfo()->set_target_address(isolate, new_target->instruction_start());
+ it.rinfo()->set_target_address(new_target->instruction_start());
#ifdef DEBUG
it.next();
DCHECK(it.done());
@@ -1169,8 +992,6 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
DCHECK(!non_compiled_functions.empty() || !wasm_to_wasm_callee.is_null());
}
- TRACE_LAZY("Compiling function %d.\n", func_to_return_idx);
-
// TODO(clemensh): compile all functions in non_compiled_functions in
// background, wait for func_to_return_idx.
CompileFunction(isolate, instance, func_to_return_idx);
@@ -1224,8 +1045,7 @@ Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
continue;
}
DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
- it.rinfo()->set_target_address(isolate,
- callee_compiled->instruction_start());
+ it.rinfo()->set_target_address(callee_compiled->instruction_start());
++patched;
}
DCHECK_EQ(non_compiled_functions.size(), idx);
@@ -1251,6 +1071,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
CompileFunction(isolate, instance, exported_func_index);
{
DisallowHeapAllocation no_gc;
+ int patched = 0;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
RelocIterator it(*js_to_wasm_caller,
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
@@ -1263,10 +1084,21 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
DCHECK_NOT_NULL(callee_compiled);
if (current_callee->kind() == WasmCode::kWasmToWasmWrapper) {
WasmPatchWasmToWasmWrapper(isolate, current_callee, callee_compiled);
+ ++patched;
} else {
+ DCHECK_EQ(WasmCode::kLazyStub,
+ isolate->wasm_engine()
+ ->code_manager()
+ ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address())
+ ->kind());
it.rinfo()->set_js_to_wasm_address(
- isolate, callee_compiled->instructions().start());
+ callee_compiled->instructions().start());
+ ++patched;
}
+ DCHECK_LT(0, patched);
+ TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ USE(patched);
+
#ifdef DEBUG
it.next();
DCHECK(it.done());
@@ -1313,6 +1145,7 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
->module()
->functions[caller_func_index]
.code.offset();
+ int num_non_compiled_functions = 0;
for (RelocIterator it(wasm_caller->instructions(),
wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
@@ -1333,6 +1166,8 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
non_compiled_functions.push_back(Nothing<uint32_t>());
continue;
}
+ ++num_non_compiled_functions;
+
uint32_t called_func_index =
ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
DCHECK_LT(called_func_index,
@@ -1344,6 +1179,10 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
maybe_func_to_return_idx = Just(called_func_index);
}
}
+
+ TRACE_LAZY("Found %d non-compiled functions in caller.\n",
+ num_non_compiled_functions);
+ USE(num_non_compiled_functions);
}
uint32_t func_to_return_idx = 0;
@@ -1365,10 +1204,12 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
const WasmCode* ret = CompileFunction(isolate, instance, func_to_return_idx);
DCHECK_NOT_NULL(ret);
+ int patched = 0;
if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
// We can finish it all here by compiling the target wasm function and
// patching the wasm_to_wasm caller.
WasmPatchWasmToWasmWrapper(isolate, last_callee, ret);
+ ++patched;
} else {
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
@@ -1376,7 +1217,6 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
// Now patch the code object with all functions which are now compiled. This
// will pick up any other compiled functions, not only {ret}.
size_t idx = 0;
- size_t patched = 0;
for (RelocIterator
it(wasm_caller->instructions(), wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
@@ -1388,13 +1228,22 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
const WasmCode* callee_compiled =
compiled_module->GetNativeModule()->GetCode(lookup);
if (callee_compiled->kind() != WasmCode::kFunction) continue;
+ DCHECK_EQ(WasmCode::kLazyStub,
+ isolate->wasm_engine()
+ ->code_manager()
+ ->GetCodeFromStartAddress(it.rinfo()->wasm_call_address())
+ ->kind());
it.rinfo()->set_wasm_call_address(
- isolate, callee_compiled->instructions().start());
+ callee_compiled->instructions().start());
++patched;
}
DCHECK_EQ(non_compiled_functions.size(), idx);
- TRACE_LAZY("Patched %zu location(s) in the caller.\n", patched);
}
+
+ DCHECK_LT(0, patched);
+ TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ USE(patched);
+
return ret;
}
@@ -1679,8 +1528,7 @@ void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
}
}
-// static
-MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObject(
+MaybeHandle<WasmModuleObject> CompileToModuleObject(
Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
@@ -1703,21 +1551,20 @@ bool compile_lazy(const WasmModule* module) {
(FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
}
-void FlushICache(Isolate* isolate, const wasm::NativeModule* native_module) {
+void FlushICache(const wasm::NativeModule* native_module) {
for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
const wasm::WasmCode* code = native_module->GetCode(i);
if (code == nullptr) continue;
- Assembler::FlushICache(isolate, code->instructions().start(),
+ Assembler::FlushICache(code->instructions().start(),
code->instructions().size());
}
}
-void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
+void FlushICache(Handle<FixedArray> functions) {
for (int i = 0, e = functions->length(); i < e; ++i) {
if (!functions->get(i)->IsCode()) continue;
Code* code = Code::cast(functions->get(i));
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
}
}
@@ -1811,7 +1658,8 @@ WasmCodeWrapper EnsureExportedLazyDeoptData(Isolate* isolate,
return WasmCodeWrapper(code);
}
// Clone the lazy builtin into the native module.
- return WasmCodeWrapper(native_module->CloneLazyBuiltinInto(func_index));
+ return WasmCodeWrapper(
+ native_module->CloneLazyBuiltinInto(code, func_index));
}
}
@@ -1825,7 +1673,7 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<FixedArray> code_table, wasm::NativeModule* native_module,
uint32_t func_index, Handle<FixedArray> export_table, int export_index,
- std::unordered_map<uint32_t, uint32_t>* table_export_count) {
+ std::unordered_map<uint32_t, uint32_t>* num_table_exports) {
if (!FLAG_wasm_jit_to_native) {
Handle<Code> code =
EnsureExportedLazyDeoptData(isolate, instance, code_table,
@@ -1845,10 +1693,10 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
// [#4: export table
// #5: export table index]
// ...
- // table_export_count counts down and determines the index for the new
+ // num_table_exports counts down and determines the index for the new
// export table entry.
- auto table_export_entry = table_export_count->find(func_index);
- DCHECK(table_export_entry != table_export_count->end());
+ auto table_export_entry = num_table_exports->find(func_index);
+ DCHECK(table_export_entry != num_table_exports->end());
DCHECK_LT(0, table_export_entry->second);
uint32_t this_idx = 2 * table_export_entry->second;
--table_export_entry->second;
@@ -1881,10 +1729,10 @@ WasmCodeWrapper EnsureTableExportLazyDeoptData(
// [#2: export table
// #3: export table index]
// ...
- // table_export_count counts down and determines the index for the new
+ // num_table_exports counts down and determines the index for the new
// export table entry.
- auto table_export_entry = table_export_count->find(func_index);
- DCHECK(table_export_entry != table_export_count->end());
+ auto table_export_entry = num_table_exports->find(func_index);
+ DCHECK(table_export_entry != num_table_exports->end());
DCHECK_LT(0, table_export_entry->second);
--table_export_entry->second;
uint32_t this_idx = 2 * table_export_entry->second;
@@ -2192,19 +2040,17 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
return result;
}
-InstanceBuilder::InstanceBuilder(
- Isolate* isolate, ErrorThrower* thrower,
- Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> ffi,
- MaybeHandle<JSArrayBuffer> memory,
- WeakCallbackInfo<void>::Callback instance_finalizer_callback)
+InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> ffi,
+ MaybeHandle<JSArrayBuffer> memory)
: isolate_(isolate),
module_(module_object->compiled_module()->shared()->module()),
async_counters_(isolate->async_counters()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
- memory_(memory),
- instance_finalizer_callback_(instance_finalizer_callback) {
+ memory_(memory) {
sanitized_imports_.reserve(module_->import_table.size());
}
@@ -2222,12 +2068,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (thrower_->error()) return {};
// TODO(6792): No longer needed once WebAssembly code is off heap.
- // Use base::Optional to be able to close the scope before executing the start
- // function.
- base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
- base::in_place_t(), isolate_->heap());
+ CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
// From here on, we expect the build pipeline to run without exiting to JS.
- // Exception is when we run the startup function.
DisallowJavascriptExecution no_js(isolate_);
// Record build time into correct bucket, then build instance.
TimedHistogramScope wasm_instantiate_module_time_scope(
@@ -2238,14 +2080,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Reuse the compiled module (if no owner), otherwise clone.
//--------------------------------------------------------------------------
- // TODO(mtrofin): remove code_table and old_code_table
+ // TODO(mtrofin): remove code_table
// when FLAG_wasm_jit_to_native is not needed
Handle<FixedArray> code_table;
Handle<FixedArray> wrapper_table;
- // We keep around a copy of the old code table, because we'll be replacing
- // imports for the new instance, and then we need the old imports to be
- // able to relocate.
- Handle<FixedArray> old_code_table;
MaybeHandle<WasmInstanceObject> owner;
// native_module is the one we're building now, old_module
// is the one we clone from. They point to the same place if
@@ -2284,7 +2122,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
} else {
TRACE("Cloning from %d\n", original->instance_id());
- old_code_table = handle(original->code_table(), isolate_);
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
code_table = handle(compiled_module_->code_table(), isolate_);
wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
@@ -2345,7 +2182,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
compiled_module_->GetNativeModule()->instance_id);
} else {
code_table = handle(compiled_module_->code_table(), isolate_);
- old_code_table = factory->CopyFixedArray(code_table);
TRACE("Reusing existing instance %d\n",
compiled_module_->instance_id());
}
@@ -2549,11 +2385,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
if (FLAG_wasm_jit_to_native) {
- FlushICache(isolate_, native_module);
+ FlushICache(native_module);
} else {
- FlushICache(isolate_, code_table);
+ FlushICache(code_table);
}
- FlushICache(isolate_, wrapper_table);
+ FlushICache(wrapper_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
@@ -2570,8 +2406,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Insert the compiled module into the weak list of compiled modules.
//--------------------------------------------------------------------------
{
- Handle<Object> global_handle =
- isolate_->global_handles()->Create(*instance);
Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
if (!owner.is_null()) {
// Publish the new instance to the instances chain.
@@ -2580,9 +2414,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
module_object_->set_compiled_module(*compiled_module_);
compiled_module_->set_weak_owning_instance(*link_to_owning_instance);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- instance_finalizer_callback_,
- v8::WeakCallbackType::kFinalizer);
+ WasmInstanceObject::InstallFinalizer(isolate_, instance);
}
//--------------------------------------------------------------------------
@@ -2607,41 +2439,20 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
//--------------------------------------------------------------------------
- // Execute the start function if one was specified.
+ // Create a wrapper for the start function.
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
- HandleScope scope(isolate_);
int start_index = module_->start_function_index;
- WasmCodeWrapper startup_code = EnsureExportedLazyDeoptData(
+ WasmCodeWrapper start_code = EnsureExportedLazyDeoptData(
isolate_, instance, code_table, native_module, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
- isolate_, module_, startup_code, start_index,
+ isolate_, module_, start_code, start_index,
compiled_module_->use_trap_handler());
- Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
+ start_function_ = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
- RecordStats(startup_code, counters());
- // Call the JS function.
- Handle<Object> undefined = factory->undefined_value();
- // Close the modification scopes, so we can execute the start function.
- modification_scope.reset();
- native_module_modification_scope.reset();
- {
- // We're OK with JS execution here. The instance is fully setup.
- AllowJavascriptExecution allow_js(isolate_);
- MaybeHandle<Object> retval =
- Execution::Call(isolate_, startup_fct, undefined, 0, nullptr);
-
- if (retval.is_null()) {
- DCHECK(isolate_->has_pending_exception());
- // It's unfortunate that the new instance is already linked in the
- // chain. However, we need to set up everything before executing the
- // startup unction, such that stack trace information can be generated
- // correctly already in the start function.
- return {};
- }
- }
+ RecordStats(start_code, counters());
}
DCHECK(!isolate_->has_pending_exception());
@@ -2655,6 +2466,22 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
return instance;
}
+bool InstanceBuilder::ExecuteStartFunction() {
+ if (start_function_.is_null()) return true; // No start function.
+
+ HandleScope scope(isolate_);
+ // Call the JS function.
+ Handle<Object> undefined = isolate_->factory()->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate_, start_function_, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ DCHECK(isolate_->has_pending_exception());
+ return false;
+ }
+ return true;
+}
+
// Look up an import value in the {ffi_} object.
MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
Handle<String> module_name,
@@ -2939,6 +2766,11 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
i += kFunctionTableEntrySize) {
table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
}
+ WasmContext* wasm_context = nullptr;
+ if (WASM_CONTEXT_TABLES) {
+ wasm_context = instance->wasm_context()->get();
+ EnsureWasmContextTable(wasm_context, imported_cur_size);
+ }
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_cur_size; ++i) {
@@ -2956,7 +2788,7 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
auto target = Handle<WasmExportedFunction>::cast(val);
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
FunctionSig* sig = nullptr;
Handle<Code> code =
MakeWasmToWasmWrapper(isolate_, target, nullptr, &sig,
@@ -2968,34 +2800,17 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
table_instance.function_table->set(
compiler::FunctionTableCodeOffset(i), *code);
} else {
- const wasm::WasmCode* exported_code =
- target->GetWasmCode().GetWasmCode();
- wasm::NativeModule* exporting_module = exported_code->owner();
Handle<WasmInstanceObject> imported_instance =
handle(target->instance());
- imported_wasm_instances.Set(imported_instance, imported_instance);
+ const wasm::WasmCode* exported_code =
+ target->GetWasmCode().GetWasmCode();
FunctionSig* sig = imported_instance->module()
->functions[exported_code->index()]
.sig;
- wasm::WasmCode* wrapper_code =
- exporting_module->GetExportedWrapper(exported_code->index());
- if (wrapper_code == nullptr) {
- WasmContext* other_context =
- imported_instance->wasm_context()->get();
- Handle<Code> wrapper = compiler::CompileWasmToWasmWrapper(
- isolate_, target->GetWasmCode(), sig,
- reinterpret_cast<Address>(other_context));
- set_of_native_module_scopes.Add(exporting_module);
- wrapper_code = exporting_module->AddExportedWrapper(
- wrapper, exported_code->index());
- }
- int sig_index = module_->signature_map.Find(sig);
- Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
- wrapper_code->instructions().start(), TENURED);
- table_instance.function_table->set(
- compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
- table_instance.function_table->set(
- compiler::FunctionTableCodeOffset(i), *foreign_holder);
+ auto& entry = wasm_context->table[i];
+ entry.context = imported_instance->wasm_context()->get();
+ entry.sig_id = module_->signature_map.Find(sig);
+ entry.target = exported_code->instructions().start();
}
}
@@ -3187,6 +3002,20 @@ void InstanceBuilder::ProcessExports(
// Fill the table to cache the exported JSFunction wrappers.
js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
Handle<JSFunction>::null());
+
+ // If an imported WebAssembly function gets exported, the exported function
+ // has to be identical to to imported function. Therefore we put all
+ // imported WebAssembly functions into the js_wrappers_ list.
+ for (int index = 0, end = static_cast<int>(module_->import_table.size());
+ index < end; ++index) {
+ WasmImport& import = module_->import_table[index];
+ if (import.kind == kExternalFunction) {
+ Handle<Object> value = sanitized_imports_[index].value;
+ if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ js_wrappers_[import.index] = Handle<JSFunction>::cast(value);
+ }
+ }
+ }
}
Handle<JSObject> exports_object;
@@ -3345,12 +3174,6 @@ void InstanceBuilder::InitializeTables(
Handle<WasmInstanceObject> instance,
CodeSpecialization* code_specialization) {
size_t function_table_count = module_->function_tables.size();
- std::vector<GlobalHandleAddress> new_function_tables(function_table_count);
-
- wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
- std::vector<GlobalHandleAddress> empty;
- std::vector<GlobalHandleAddress>& old_function_tables =
- FLAG_wasm_jit_to_native ? native_module->function_tables() : empty;
Handle<FixedArray> old_function_tables_gc =
FLAG_wasm_jit_to_native
@@ -3372,9 +3195,7 @@ void InstanceBuilder::InitializeTables(
instance->set_function_tables(*rooted_function_tables);
- if (FLAG_wasm_jit_to_native) {
- DCHECK_EQ(old_function_tables.size(), new_function_tables.size());
- } else {
+ if (!FLAG_wasm_jit_to_native) {
DCHECK_EQ(old_function_tables_gc->length(),
new_function_tables_gc->length());
}
@@ -3386,6 +3207,11 @@ void InstanceBuilder::InitializeTables(
int num_table_entries = static_cast<int>(table.initial_size);
int table_size = compiler::kFunctionTableEntrySize * num_table_entries;
+ if (WASM_CONTEXT_TABLES) {
+ WasmContext* wasm_context = instance->wasm_context()->get();
+ EnsureWasmContextTable(wasm_context, num_table_entries);
+ }
+
if (table_instance.function_table.is_null()) {
// Create a new dispatch table if necessary.
table_instance.function_table =
@@ -3427,24 +3253,18 @@ void InstanceBuilder::InitializeTables(
GlobalHandleAddress new_func_table_addr = global_func_table.address();
GlobalHandleAddress old_func_table_addr;
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
WasmCompiledModule::SetTableValue(isolate_, new_function_tables_gc,
int_index, new_func_table_addr);
old_func_table_addr =
WasmCompiledModule::GetTableValue(*old_function_tables_gc, int_index);
- } else {
- new_function_tables[int_index] = new_func_table_addr;
-
- old_func_table_addr = old_function_tables[int_index];
+ code_specialization->RelocatePointer(old_func_table_addr,
+ new_func_table_addr);
}
- code_specialization->RelocatePointer(old_func_table_addr,
- new_func_table_addr);
}
- if (FLAG_wasm_jit_to_native) {
- native_module->function_tables() = new_function_tables;
- } else {
+ if (!WASM_CONTEXT_TABLES) {
compiled_module_->set_function_tables(*new_function_tables_gc);
}
}
@@ -3499,10 +3319,12 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
- uint32_t sig_index = module_->signature_ids[function->sig_index];
+
+ // Update the local dispatch table first.
+ uint32_t sig_id = module_->signature_ids[function->sig_index];
table_instance.function_table->set(
compiler::FunctionTableSigOffset(table_index),
- Smi::FromInt(sig_index));
+ Smi::FromInt(sig_id));
WasmCodeWrapper wasm_code = EnsureTableExportLazyDeoptData(
isolate_, instance, code_table, native_module, func_index,
table_instance.function_table, table_index, &num_table_exports);
@@ -3517,7 +3339,17 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
table_instance.function_table->set(
compiler::FunctionTableCodeOffset(table_index),
*value_to_update_with);
+
+ if (WASM_CONTEXT_TABLES) {
+ WasmContext* wasm_context = instance->wasm_context()->get();
+ auto& entry = wasm_context->table[table_index];
+ entry.sig_id = sig_id;
+ entry.context = wasm_context;
+ entry.target = wasm_code.instructions().start();
+ }
+
if (!table_instance.table_object.is_null()) {
+ // Update the table object's other dispatch tables.
if (js_wrappers_[func_index].is_null()) {
// No JSFunction entry yet exists for this function. Create one.
// TODO(titzer): We compile JS->wasm wrappers for functions are
@@ -3546,31 +3378,10 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
}
table_instance.js_wrappers->set(table_index,
*js_wrappers_[func_index]);
- // When updating dispatch tables, we need to provide a wasm-to-wasm
- // wrapper for wasm_code - unless wasm_code is already a wrapper. If
- // it's a wasm-to-js wrapper, we don't need to construct a
- // wasm-to-wasm wrapper because there's no context switching required.
- // The remaining case is that it's a wasm-to-wasm wrapper, in which
- // case it's already doing "the right thing", and wrapping it again
- // would be redundant.
- if (func_index >= module_->num_imported_functions) {
- value_to_update_with = GetOrCreateIndirectCallWrapper(
- isolate_, instance, wasm_code, func_index, function->sig);
- } else {
- if (wasm_code.IsCodeObject()) {
- DCHECK(wasm_code.GetCode()->kind() == Code::WASM_TO_JS_FUNCTION ||
- wasm_code.GetCode()->kind() ==
- Code::WASM_TO_WASM_FUNCTION);
- } else {
- DCHECK(wasm_code.GetWasmCode()->kind() ==
- WasmCode::kWasmToJsWrapper ||
- wasm_code.GetWasmCode()->kind() ==
- WasmCode::kWasmToWasmWrapper);
- }
- }
- WasmTableObject::UpdateDispatchTables(table_instance.table_object,
- table_index, function->sig,
- value_to_update_with);
+ // UpdateDispatchTables() should update this instance as well.
+ WasmTableObject::UpdateDispatchTables(
+ isolate_, table_instance.table_object, table_index, function->sig,
+ instance, wasm_code, func_index);
}
}
}
@@ -3686,14 +3497,18 @@ void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
- RejectPromise(isolate_, context_, thrower, module_promise_);
+ MaybeHandle<Object> promise_result =
+ JSPromise::Reject(module_promise_, thrower.Reify());
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
- ResolvePromise(isolate_, context_, module_promise_, result);
+ MaybeHandle<Object> promise_result =
+ JSPromise::Resolve(module_promise_, result);
+ CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
// A closure to run a compilation step (either as foreground or background
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 3a8b1972d6..b41ca28cea 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -23,34 +23,20 @@ namespace wasm {
class ModuleCompiler;
class WasmCode;
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
-V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
-
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
+MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory);
-V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncCompileAndInstantiate(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports, MaybeHandle<JSArrayBuffer> memory);
-
-V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes,
- bool is_shared);
-
-V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
- Handle<JSPromise> promise,
- Handle<WasmModuleObject> module_object,
- MaybeHandle<JSReceiver> imports);
-
-V8_EXPORT_PRIVATE void CompileJsToWasmWrappers(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- Counters* counters);
+V8_EXPORT_PRIVATE
+void CompileJsToWasmWrappers(Isolate* isolate,
+ Handle<WasmCompiledModule> compiled_module,
+ Counters* counters);
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes);
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 010f191263..109b2fc230 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -270,7 +270,7 @@ class ModuleDecoderImpl : public Decoder {
pc_ = end_; // On error, terminate section decoding loop.
}
- void DumpModule(const ModuleResult& result) {
+ void DumpModule(const Vector<const byte> module_bytes) {
std::string path;
if (FLAG_dump_wasm_module_path) {
path = FLAG_dump_wasm_module_path;
@@ -280,12 +280,13 @@ class ModuleDecoderImpl : public Decoder {
}
}
// File are named `HASH.{ok,failed}.wasm`.
- size_t hash = base::hash_range(start_, end_);
+ size_t hash = base::hash_range(module_bytes.start(), module_bytes.end());
EmbeddedVector<char, 32> buf;
- SNPrintF(buf, "%016zx.%s.wasm", hash, result.ok() ? "ok" : "failed");
+ SNPrintF(buf, "%016zx.%s.wasm", hash, ok() ? "ok" : "failed");
std::string name(buf.start());
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
- if (fwrite(start_, end_ - start_, 1, wasm_file) != 1) {
+ if (fwrite(module_bytes.start(), module_bytes.length(), 1, wasm_file) !=
+ 1) {
OFStream os(stderr);
os << "Error while dumping wasm file" << std::endl;
}
@@ -848,7 +849,6 @@ class ModuleDecoderImpl : public Decoder {
// Copy error code and location.
result.MoveErrorFrom(intermediate_result_);
}
- if (FLAG_dump_wasm_module) DumpModule(result);
return result;
}
@@ -856,6 +856,7 @@ class ModuleDecoderImpl : public Decoder {
ModuleResult DecodeModule(Isolate* isolate, bool verify_functions = true) {
StartDecoding(isolate);
uint32_t offset = 0;
+ Vector<const byte> orig_bytes(start(), end() - start());
DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset);
if (failed()) {
return FinishDecoding(verify_functions);
@@ -878,6 +879,8 @@ class ModuleDecoderImpl : public Decoder {
section_iter.advance(true);
}
+ if (FLAG_dump_wasm_module) DumpModule(orig_bytes);
+
if (decoder.failed()) {
return decoder.toResult<std::unique_ptr<WasmModule>>(nullptr);
}
diff --git a/deps/v8/src/wasm/wasm-api.cc b/deps/v8/src/wasm/wasm-api.cc
deleted file mode 100644
index 4c51dc54cd..0000000000
--- a/deps/v8/src/wasm/wasm-api.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-api.h"
-
-#include "src/isolate-inl.h"
-#include "src/isolate.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-ScheduledErrorThrower::~ScheduledErrorThrower() {
- // There should never be both a pending and a scheduled exception.
- DCHECK(!isolate()->has_scheduled_exception() ||
- !isolate()->has_pending_exception());
- // Don't throw another error if there is already a scheduled error.
- if (isolate()->has_scheduled_exception()) {
- Reset();
- } else if (isolate()->has_pending_exception()) {
- Reset();
- isolate()->OptionalRescheduleException(false);
- } else if (error()) {
- isolate()->ScheduleThrow(*Reify());
- }
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-api.h b/deps/v8/src/wasm/wasm-api.h
deleted file mode 100644
index 464cdfa6f1..0000000000
--- a/deps/v8/src/wasm/wasm-api.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_API_H_
-#define V8_WASM_API_H_
-
-#include "src/wasm/wasm-result.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Like an ErrorThrower, but turns all pending exceptions into scheduled
-// exceptions when going out of scope. Use this in API methods.
-// Note that pending exceptions are not necessarily created by the ErrorThrower,
-// but e.g. by the wasm start function. There might also be a scheduled
-// exception, created by another API call (e.g. v8::Object::Get). But there
-// should never be both pending and scheduled exceptions.
-class V8_EXPORT_PRIVATE ScheduledErrorThrower : public ErrorThrower {
- public:
- ScheduledErrorThrower(v8::Isolate* isolate, const char* context)
- : ScheduledErrorThrower(reinterpret_cast<Isolate*>(isolate), context) {}
-
- ScheduledErrorThrower(Isolate* isolate, const char* context)
- : ErrorThrower(isolate, context) {}
-
- ~ScheduledErrorThrower();
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_API_H_
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 2b8f309733..25f61d2e12 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -30,7 +30,6 @@ namespace internal {
namespace wasm {
namespace {
-size_t native_module_ids = 0;
#if V8_TARGET_ARCH_X64
#define __ masm->
@@ -71,10 +70,11 @@ void PatchTrampolineAndStubCalls(
#else
Address new_target = old_target;
#endif
- it.rinfo()->set_target_address(nullptr, new_target, SKIP_WRITE_BARRIER,
+ it.rinfo()->set_target_address(new_target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
}
}
+
} // namespace
DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
@@ -212,18 +212,21 @@ void WasmCode::Disassemble(const char* name, Isolate* isolate,
instructions().start() + instruction_size, nullptr);
os << "\n";
- Object* source_positions_or_undef =
- owner_->compiled_module()->source_positions()->get(index());
- if (!source_positions_or_undef->IsUndefined(isolate)) {
- os << "Source positions:\n pc offset position\n";
- for (SourcePositionTableIterator it(
- ByteArray::cast(source_positions_or_undef));
- !it.done(); it.Advance()) {
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ScriptOffset()
- << (it.is_statement() ? " statement" : "") << "\n";
+ // Anonymous functions don't have source positions.
+ if (!IsAnonymous()) {
+ Object* source_positions_or_undef =
+ owner_->compiled_module()->source_positions()->get(index());
+ if (!source_positions_or_undef->IsUndefined(isolate)) {
+ os << "Source positions:\n pc offset position\n";
+ for (SourcePositionTableIterator it(
+ ByteArray::cast(source_positions_or_undef));
+ !it.done(); it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
+ }
+ os << "\n";
}
- os << "\n";
}
os << "RelocInfo (size = " << reloc_size_ << ")\n";
@@ -268,10 +271,12 @@ WasmCode::~WasmCode() {
}
}
+base::AtomicNumber<size_t> NativeModule::next_id_;
+
NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
bool can_request_more, VirtualMemory* mem,
WasmCodeManager* code_manager)
- : instance_id(native_module_ids++),
+ : instance_id(next_id_.Increment(1)),
code_table_(num_functions),
num_imported_functions_(num_imports),
free_memory_(reinterpret_cast<Address>(mem->address()),
@@ -296,11 +301,6 @@ void NativeModule::ResizeCodeTableForTest(size_t last_index) {
source_positions = isolate->factory()->CopyFixedArrayAndGrow(
source_positions, grow_by, TENURED);
compiled_module()->set_source_positions(*source_positions);
- Handle<FixedArray> handler_table(compiled_module()->handler_table(),
- isolate);
- handler_table = isolate->factory()->CopyFixedArrayAndGrow(handler_table,
- grow_by, TENURED);
- compiled_module()->set_handler_table(*handler_table);
}
}
@@ -318,19 +318,24 @@ WasmCode* NativeModule::AddOwnedCode(
std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
// both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
Address executable_buffer = AllocateForCode(orig_instructions.size());
- if (executable_buffer == nullptr) return nullptr;
+ if (executable_buffer == nullptr) {
+ V8::FatalProcessOutOfMemory("NativeModule::AddOwnedCode");
+ UNREACHABLE();
+ }
memcpy(executable_buffer, orig_instructions.start(),
orig_instructions.size());
std::unique_ptr<WasmCode> code(new WasmCode(
{executable_buffer, orig_instructions.size()}, std::move(reloc_info),
reloc_size, this, index, kind, constant_pool_offset, stack_slots,
- safepoint_table_offset, std::move(protected_instructions), is_liftoff));
+ safepoint_table_offset, handler_table_offset,
+ std::move(protected_instructions), is_liftoff));
WasmCode* ret = code.get();
// TODO(mtrofin): We allocate in increasing address order, and
@@ -339,8 +344,8 @@ WasmCode* NativeModule::AddOwnedCode(
auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
code, owned_code_comparer_);
owned_code_.insert(insert_before, std::move(code));
- wasm_code_manager_->FlushICache(ret->instructions().start(),
- ret->instructions().size());
+ Assembler::FlushICache(ret->instructions().start(),
+ ret->instructions().size());
return ret;
}
@@ -348,12 +353,10 @@ WasmCode* NativeModule::AddOwnedCode(
WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, kind);
- SetCodeTable(index, ret);
+ code_table_[index] = ret;
ret->index_ = Just(index);
compiled_module()->source_positions()->set(static_cast<int>(index),
code->source_position_table());
- compiled_module()->handler_table()->set(static_cast<int>(index),
- code->handler_table());
return ret;
}
@@ -364,15 +367,11 @@ WasmCode* NativeModule::AddInterpreterWrapper(Handle<Code> code,
return ret;
}
-WasmCode* NativeModule::SetLazyBuiltin(Handle<Code> code) {
- DCHECK_NULL(lazy_builtin_);
- lazy_builtin_ = AddAnonymousCode(code, WasmCode::kLazyStub);
-
+void NativeModule::SetLazyBuiltin(Handle<Code> code) {
+ WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
- SetCodeTable(i, lazy_builtin_);
+ code_table_[i] = lazy_builtin;
}
-
- return lazy_builtin_;
}
WasmCompiledModule* NativeModule::compiled_module() const {
@@ -392,13 +391,16 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
reloc_info.reset(new byte[code->relocation_size()]);
memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
}
+ std::shared_ptr<ProtectedInstructions> protected_instructions(
+ new ProtectedInstructions(0));
WasmCode* ret = AddOwnedCode(
{code->instruction_start(),
static_cast<size_t>(code->instruction_size())},
std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
Nothing<uint32_t>(), kind, code->constant_pool_offset(),
(code->has_safepoint_info() ? code->stack_slots() : 0),
- (code->has_safepoint_info() ? code->safepoint_table_offset() : 0), {});
+ (code->has_safepoint_info() ? code->safepoint_table_offset() : 0),
+ code->handler_table_offset(), protected_instructions, false);
if (ret == nullptr) return nullptr;
intptr_t delta = ret->instructions().start() - code->instruction_start();
int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
@@ -411,8 +413,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
Code* call_target =
Code::GetCodeFromTargetAddress(orig_it.rinfo()->target_address());
- it.rinfo()->set_target_address(nullptr,
- GetLocalAddressFor(handle(call_target)),
+ it.rinfo()->set_target_address(GetLocalAddressFor(handle(call_target)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
@@ -427,7 +428,7 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode* NativeModule::AddCode(
const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
- size_t safepoint_table_offset,
+ size_t safepoint_table_offset, size_t handler_table_offset,
std::unique_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
std::unique_ptr<byte[]> reloc_info;
@@ -441,11 +442,11 @@ WasmCode* NativeModule::AddCode(
{desc.buffer, static_cast<size_t>(desc.instr_size)},
std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
WasmCode::kFunction, desc.instr_size - desc.constant_pool_size,
- frame_slots, safepoint_table_offset, std::move(protected_instructions),
- is_liftoff);
+ frame_slots, safepoint_table_offset, handler_table_offset,
+ std::move(protected_instructions), is_liftoff);
if (ret == nullptr) return nullptr;
- SetCodeTable(index, ret);
+ code_table_[index] = ret;
// TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
@@ -467,12 +468,12 @@ WasmCode* NativeModule::AddCode(
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(nullptr, GetLocalAddressFor(handle(code)),
+ it.rinfo()->set_target_address(GetLocalAddressFor(handle(code)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
- it.rinfo()->set_target_runtime_entry(
- origin->isolate(), p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} else {
intptr_t delta = ret->instructions().start() - desc.buffer;
it.rinfo()->apply(delta);
@@ -490,8 +491,7 @@ Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
masm.GetCode(nullptr, &code_desc);
WasmCode* wasm_code = AddOwnedCode(
{code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
- Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, {});
- if (wasm_code == nullptr) return nullptr;
+ Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, 0, {}, false);
Address ret = wasm_code->instructions().start();
trampolines_.emplace(std::make_pair(dest, ret));
return ret;
@@ -560,7 +560,7 @@ void NativeModule::Link(uint32_t index) {
if (target == nullptr) continue;
Address target_addr = target->instructions().start();
DCHECK_NOT_NULL(target);
- it.rinfo()->set_wasm_call_address(nullptr, target_addr,
+ it.rinfo()->set_wasm_call_address(target_addr,
ICacheFlushMode::SKIP_ICACHE_FLUSH);
}
}
@@ -655,29 +655,29 @@ WasmCode* NativeModule::Lookup(Address pc) {
return nullptr;
}
-WasmCode* NativeModule::CloneLazyBuiltinInto(uint32_t index) {
- DCHECK_NOT_NULL(lazy_builtin());
- WasmCode* ret = CloneCode(lazy_builtin());
- SetCodeTable(index, ret);
+WasmCode* NativeModule::CloneLazyBuiltinInto(const WasmCode* code,
+ uint32_t index) {
+ DCHECK_EQ(wasm::WasmCode::kLazyStub, code->kind());
+ WasmCode* ret = CloneCode(code);
+ code_table_[index] = ret;
ret->index_ = Just(index);
return ret;
}
-bool NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
+void NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
for (auto& pair : other->trampolines_) {
Address key = pair.first;
Address local =
GetLocalAddressFor(handle(Code::GetCodeFromTargetAddress(key)));
- if (local == nullptr) return false;
+ DCHECK_NOT_NULL(local);
trampolines_.emplace(std::make_pair(key, local));
}
for (auto& pair : other->stubs_) {
uint32_t key = pair.first;
WasmCode* clone = CloneCode(pair.second);
- if (!clone) return false;
+ DCHECK_NOT_NULL(clone);
stubs_.emplace(std::make_pair(key, clone));
}
- return true;
}
WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
@@ -692,10 +692,10 @@ WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
original_code->reloc_info().size(), original_code->index_,
original_code->kind(), original_code->constant_pool_offset_,
original_code->stack_slots(), original_code->safepoint_table_offset_,
- original_code->protected_instructions_);
- if (ret == nullptr) return nullptr;
+ original_code->handler_table_offset_,
+ original_code->protected_instructions_, original_code->is_liftoff());
if (!ret->IsAnonymous()) {
- SetCodeTable(ret->index(), ret);
+ code_table_[ret->index()] = ret;
}
intptr_t delta =
ret->instructions().start() - original_code->instructions().start();
@@ -707,10 +707,6 @@ WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
return ret;
}
-void NativeModule::SetCodeTable(uint32_t index, wasm::WasmCode* code) {
- code_table_[index] = code;
-}
-
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
wasm_code_manager_->FreeNativeModuleMemories(this);
@@ -889,11 +885,7 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
TRACE_HEAP("%zu cloned from %zu\n", ret->instance_id, instance_id);
if (!ret) return ret;
- if (lazy_builtin() != nullptr) {
- ret->lazy_builtin_ = ret->CloneCode(lazy_builtin());
- }
-
- if (!ret->CloneTrampolinesAndStubs(this)) return nullptr;
+ ret->CloneTrampolinesAndStubs(this);
std::unordered_map<Address, Address, AddressHasher> reverse_lookup;
for (auto& pair : trampolines_) {
@@ -917,20 +909,29 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
WasmCode* old_stub = stubs_.find(pair.first)->second;
PatchTrampolineAndStubCalls(old_stub, new_stub, reverse_lookup);
}
- if (lazy_builtin_ != nullptr) {
- PatchTrampolineAndStubCalls(lazy_builtin_, ret->lazy_builtin_,
- reverse_lookup);
- }
+ WasmCode* anonymous_lazy_builtin = nullptr;
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
const WasmCode* original_code = GetCode(i);
switch (original_code->kind()) {
case WasmCode::kLazyStub: {
- if (original_code->IsAnonymous()) {
- ret->SetCodeTable(i, ret->lazy_builtin());
- } else {
- if (!ret->CloneLazyBuiltinInto(i)) return nullptr;
+ // Use the first anonymous lazy compile stub hit in this loop as the
+ // canonical copy for all further ones by remembering it locally via
+ // the {anonymous_lazy_builtin} variable. All non-anonymous such stubs
+ // are just cloned directly via {CloneLazyBuiltinInto} below.
+ if (!original_code->IsAnonymous()) {
+ WasmCode* new_code = ret->CloneLazyBuiltinInto(original_code, i);
+ if (new_code == nullptr) return nullptr;
+ PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
+ break;
+ }
+ if (anonymous_lazy_builtin == nullptr) {
+ WasmCode* new_code = ret->CloneCode(original_code);
+ if (new_code == nullptr) return nullptr;
+ PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
+ anonymous_lazy_builtin = new_code;
}
+ ret->code_table_[i] = anonymous_lazy_builtin;
} break;
case WasmCode::kFunction: {
WasmCode* new_code = ret->CloneCode(original_code);
@@ -941,7 +942,6 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
UNREACHABLE();
}
}
- ret->specialization_data_ = specialization_data_;
return ret;
}
@@ -1009,22 +1009,17 @@ intptr_t WasmCodeManager::remaining_uncommitted() const {
return remaining_uncommitted_.Value();
}
-void WasmCodeManager::FlushICache(Address start, size_t size) {
- Assembler::FlushICache(reinterpret_cast<internal::Isolate*>(isolate_), start,
- size);
-}
-
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
- if (native_module_) {
+ if (native_module_ && (native_module_->modification_scope_depth_++) == 0) {
bool success = native_module_->SetExecutable(false);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
- if (native_module_) {
+ if (native_module_ && (native_module_->modification_scope_depth_--) == 1) {
bool success = native_module_->SetExecutable(true);
CHECK(success);
}
@@ -1039,8 +1034,8 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
*(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
#else
- rinfo->set_target_address(nullptr, reinterpret_cast<Address>(tag),
- SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ rinfo->set_target_address(reinterpret_cast<Address>(tag), SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
#endif
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 3e2a0918fb..e398f1bcfd 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_HEAP_H_
-#define V8_WASM_HEAP_H_
+#ifndef V8_WASM_WASM_CODE_MANAGER_H_
+#define V8_WASM_WASM_CODE_MANAGER_H_
#include <functional>
#include <list>
@@ -111,6 +111,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
Address constant_pool() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
+ size_t handler_table_offset() const { return handler_table_offset_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return is_liftoff_; }
@@ -120,6 +121,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
void ResetTrapHandlerIndex();
const ProtectedInstructions& protected_instructions() const {
+ // TODO(mstarzinger): Code that doesn't have trapping instruction should
+ // not be required to have this vector, make it possible to be null.
+ DCHECK_NOT_NULL(protected_instructions_);
return *protected_instructions_.get();
}
@@ -139,9 +143,9 @@ class V8_EXPORT_PRIVATE WasmCode final {
std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
NativeModule* owner, Maybe<uint32_t> index, Kind kind,
size_t constant_pool_offset, uint32_t stack_slots,
- size_t safepoint_table_offset,
+ size_t safepoint_table_offset, size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
- bool is_liftoff = false)
+ bool is_liftoff)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
reloc_size_(reloc_size),
@@ -151,6 +155,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
+ handler_table_offset_(handler_table_offset),
protected_instructions_(std::move(protected_instructions)),
is_liftoff_(is_liftoff) {}
@@ -169,6 +174,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
// since there may be stack/register tagged values for large number
// conversions.
size_t safepoint_table_offset_ = 0;
+ size_t handler_table_offset_ = 0;
intptr_t trap_handler_index_ = -1;
std::shared_ptr<ProtectedInstructions> protected_instructions_;
bool is_liftoff_;
@@ -189,9 +195,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<NativeModule> Clone();
WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
- size_t safepoint_table_offset,
- std::unique_ptr<ProtectedInstructions>,
- bool is_liftoff = false);
+ size_t safepoint_table_offset, size_t handler_table_offset,
+ std::unique_ptr<ProtectedInstructions>, bool is_liftoff);
// A way to copy over JS-allocated code. This is because we compile
// certain wrappers using a different pipeline.
@@ -204,11 +209,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddInterpreterWrapper(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by
- // calling SetLazyBuiltin. It will initialize the code table with it, and the
- // lazy_builtin_ field. The latter is used when creating entries for exported
+ // calling SetLazyBuiltin. It will initialize the code table with it. Copies
+ // of it might be cloned from them later when creating entries for exported
// functions and indirect callable functions, so that they may be identified
// by the runtime.
- WasmCode* SetLazyBuiltin(Handle<Code> code);
+ void SetLazyBuiltin(Handle<Code> code);
// ExportedWrappers are WasmToWasmWrappers for functions placed on import
// tables. We construct them as-needed.
@@ -219,8 +224,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t FunctionCount() const;
WasmCode* GetCode(uint32_t index) const;
- WasmCode* lazy_builtin() const { return lazy_builtin_; }
-
// We special-case lazy cloning because we currently rely on making copies
// of the lazy builtin, to be able to identify, in the runtime, which function
// the lazy builtin is a placeholder of. If we used trampolines, we would call
@@ -229,7 +232,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
// builtin. The logic for seeking though frames would change, though.
// TODO(mtrofin): perhaps we can do exactly that - either before or after
// this change.
- WasmCode* CloneLazyBuiltinInto(uint32_t);
+ WasmCode* CloneLazyBuiltinInto(const WasmCode* code, uint32_t);
bool SetExecutable(bool executable);
@@ -239,24 +242,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
void LinkAll();
void Link(uint32_t index);
- // TODO(mtrofin): needed until we sort out exception handlers and
- // source positions, which are still on the GC-heap.
+ // TODO(mstarzinger): needed until we sort out source positions, which are
+ // still on the GC-heap.
WasmCompiledModule* compiled_module() const;
void SetCompiledModule(Handle<WasmCompiledModule>);
- // Shorthand accessors to the specialization data content.
- std::vector<wasm::GlobalHandleAddress>& function_tables() {
- return specialization_data_.function_tables;
- }
-
- std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
- return specialization_data_.empty_function_tables;
- }
-
uint32_t num_imported_functions() const { return num_imported_functions_; }
- size_t num_function_tables() const {
- return specialization_data_.empty_function_tables.size();
- }
size_t committed_memory() const { return committed_memory_; }
const size_t instance_id = 0;
@@ -266,6 +257,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class WasmCodeManager;
friend class NativeModuleSerializer;
friend class NativeModuleDeserializer;
+ friend class NativeModuleModificationScope;
struct WasmCodeUniquePtrComparer {
bool operator()(const std::unique_ptr<WasmCode>& a,
@@ -276,7 +268,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
}
};
- static base::AtomicNumber<uint32_t> next_id_;
+ static base::AtomicNumber<size_t> next_id_;
NativeModule(const NativeModule&) = delete;
NativeModule& operator=(const NativeModule&) = delete;
NativeModule(uint32_t num_functions, uint32_t num_imports,
@@ -295,11 +287,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t reloc_size, Maybe<uint32_t> index,
WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions>,
- bool is_liftoff = false);
- void SetCodeTable(uint32_t, wasm::WasmCode*);
+ bool is_liftoff);
WasmCode* CloneCode(const WasmCode*);
- bool CloneTrampolinesAndStubs(const NativeModule* other);
+ void CloneTrampolinesAndStubs(const NativeModule* other);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>);
@@ -319,20 +311,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
DisjointAllocationPool allocated_memory_;
std::list<VirtualMemory> owned_memory_;
WasmCodeManager* wasm_code_manager_;
- wasm::WasmCode* lazy_builtin_ = nullptr;
base::Mutex allocation_mutex_;
Handle<WasmCompiledModule> compiled_module_;
size_t committed_memory_ = 0;
bool can_request_more_memory_;
bool is_executable_ = false;
-
- // Specialization data that needs to be serialized and cloned.
- // Keeping it groupped together because it makes cloning of all these
- // elements a 1 line copy.
- struct {
- std::vector<wasm::GlobalHandleAddress> function_tables;
- std::vector<wasm::GlobalHandleAddress> empty_function_tables;
- } specialization_data_;
+ int modification_scope_depth_ = 0;
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
@@ -356,10 +340,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCode* GetCodeFromStartAddress(Address pc) const;
intptr_t remaining_uncommitted() const;
- // TODO(mtrofin): replace this API with an alternative that is Isolate-
- // independent.
- void FlushICache(Address start, size_t size);
-
private:
friend class NativeModule;
@@ -416,4 +396,5 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo);
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_CODE_MANAGER_H_
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 416d1d600a..f261f44991 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -83,32 +83,33 @@ bool IsAtWasmDirectCallTarget(RelocIterator& it) {
} // namespace
-CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone)
- : isolate_(isolate) {}
+CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
CodeSpecialization::~CodeSpecialization() {}
void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
DCHECK_NOT_NULL(new_context);
- DCHECK_NULL(new_wasm_context_address);
- new_wasm_context_address = new_context;
+ DCHECK_NULL(new_wasm_context_address_);
+ new_wasm_context_address_ = new_context;
}
void CodeSpecialization::PatchTableSize(uint32_t old_size, uint32_t new_size) {
- DCHECK(old_function_table_size == 0 && new_function_table_size == 0);
- old_function_table_size = old_size;
- new_function_table_size = new_size;
+ DCHECK(old_function_table_size_ == 0 && new_function_table_size_ == 0);
+ old_function_table_size_ = old_size;
+ new_function_table_size_ = new_size;
}
void CodeSpecialization::RelocateDirectCalls(
Handle<WasmInstanceObject> instance) {
- DCHECK(relocate_direct_calls_instance.is_null());
+ DCHECK(relocate_direct_calls_instance_.is_null());
DCHECK(!instance.is_null());
- relocate_direct_calls_instance = instance;
+ relocate_direct_calls_instance_ = instance;
}
void CodeSpecialization::RelocatePointer(Address old_ptr, Address new_ptr) {
- pointers_to_relocate.insert(std::make_pair(old_ptr, new_ptr));
+ DCHECK_EQ(0, pointers_to_relocate_.count(old_ptr));
+ DCHECK_EQ(0, pointers_to_relocate_.count(new_ptr));
+ pointers_to_relocate_.insert(std::make_pair(old_ptr, new_ptr));
}
bool CodeSpecialization::ApplyToWholeInstance(
@@ -147,14 +148,14 @@ bool CodeSpecialization::ApplyToWholeInstance(
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
// We need to patch WASM_CONTEXT_REFERENCE to put the correct address.
- if (new_wasm_context_address) {
+ if (new_wasm_context_address_) {
reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE);
}
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
- // calls, the instance registered for that (relocate_direct_calls_instance)
+ // calls, the instance registered for that (relocate_direct_calls_instance_)
// should match the instance we currently patch (instance).
- if (!relocate_direct_calls_instance.is_null()) {
- DCHECK_EQ(instance, *relocate_direct_calls_instance);
+ if (!relocate_direct_calls_instance_.is_null()) {
+ DCHECK_EQ(instance, *relocate_direct_calls_instance_);
reloc_mode |=
RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
: RelocInfo::CODE_TARGET);
@@ -170,24 +171,23 @@ bool CodeSpecialization::ApplyToWholeInstance(
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::WASM_CONTEXT_REFERENCE:
- it.rinfo()->set_wasm_context_reference(export_wrapper->GetIsolate(),
- new_wasm_context_address,
+ it.rinfo()->set_wasm_context_reference(new_wasm_context_address_,
icache_flush_mode);
break;
case RelocInfo::JS_TO_WASM_CALL: {
DCHECK(FLAG_wasm_jit_to_native);
const WasmCode* new_code = native_module->GetCode(exp.index);
- it.rinfo()->set_js_to_wasm_address(
- nullptr, new_code->instructions().start(), SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_js_to_wasm_address(new_code->instructions().start(),
+ SKIP_ICACHE_FLUSH);
} break;
case RelocInfo::CODE_TARGET: {
DCHECK(!FLAG_wasm_jit_to_native);
// Ignore calls to other builtins like ToNumber.
if (!IsAtWasmDirectCallTarget(it)) continue;
Code* new_code = Code::cast(code_table->get(exp.index));
- it.rinfo()->set_target_address(
- new_code->GetIsolate(), new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
} break;
default:
UNREACHABLE();
@@ -210,9 +210,9 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
DCHECK_EQ(wasm::WasmCode::kFunction, code.GetWasmCode()->kind());
}
- bool patch_table_size = old_function_table_size || new_function_table_size;
- bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
- bool reloc_pointers = pointers_to_relocate.size() > 0;
+ bool patch_table_size = old_function_table_size_ || new_function_table_size_;
+ bool reloc_direct_calls = !relocate_direct_calls_instance_.is_null();
+ bool reloc_pointers = pointers_to_relocate_.size() > 0;
int reloc_mode = 0;
auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
@@ -253,7 +253,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
// bytes to find the new compiled function.
size_t offset = it.rinfo()->pc() - code.GetCode()->instruction_start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
*code.GetCode());
}
int byte_pos = AdvanceSourcePositionTableIterator(
@@ -262,10 +262,9 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
FixedArray* code_table =
- relocate_direct_calls_instance->compiled_module()->code_table();
+ relocate_direct_calls_instance_->compiled_module()->code_table();
Code* new_code = Code::cast(code_table->get(called_func_index));
- it.rinfo()->set_target_address(new_code->GetIsolate(),
- new_code->instruction_start(),
+ it.rinfo()->set_target_address(new_code->instruction_start(),
UPDATE_WRITE_BARRIER, icache_flush_mode);
changed = true;
} break;
@@ -280,7 +279,7 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
size_t offset =
it.rinfo()->pc() - code.GetWasmCode()->instructions().start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance,
+ patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
code.GetWasmCode());
}
int byte_pos = AdvanceSourcePositionTableIterator(
@@ -289,23 +288,24 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
const WasmCode* new_code = native_module->GetCode(called_func_index);
- it.rinfo()->set_wasm_call_address(
- isolate_, new_code->instructions().start(), icache_flush_mode);
+ it.rinfo()->set_wasm_call_address(new_code->instructions().start(),
+ icache_flush_mode);
changed = true;
} break;
case RelocInfo::WASM_GLOBAL_HANDLE: {
DCHECK(reloc_pointers);
Address old_ptr = it.rinfo()->global_handle();
- if (pointers_to_relocate.count(old_ptr) == 1) {
- Address new_ptr = pointers_to_relocate[old_ptr];
- it.rinfo()->set_global_handle(isolate_, new_ptr, icache_flush_mode);
+ auto entry = pointers_to_relocate_.find(old_ptr);
+ if (entry != pointers_to_relocate_.end()) {
+ Address new_ptr = entry->second;
+ it.rinfo()->set_global_handle(new_ptr, icache_flush_mode);
changed = true;
}
} break;
case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
DCHECK(patch_table_size);
it.rinfo()->update_wasm_function_table_size_reference(
- isolate_, old_function_table_size, new_function_table_size,
+ old_function_table_size_, new_function_table_size_,
icache_flush_mode);
changed = true;
break;
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
index 8f68677fbf..bed565cf05 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.h
+++ b/deps/v8/src/wasm/wasm-code-specialization.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CODE_SPECIALIZATION_H_
-#define V8_WASM_CODE_SPECIALIZATION_H_
+#ifndef V8_WASM_WASM_CODE_SPECIALIZATION_H_
+#define V8_WASM_WASM_CODE_SPECIALIZATION_H_
#include "src/assembler.h"
#include "src/identity-map.h"
@@ -47,19 +47,18 @@ class CodeSpecialization {
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
- Isolate* isolate_;
- Address new_wasm_context_address = 0;
+ Address new_wasm_context_address_ = 0;
- uint32_t old_function_table_size = 0;
- uint32_t new_function_table_size = 0;
+ uint32_t old_function_table_size_ = 0;
+ uint32_t new_function_table_size_ = 0;
- Handle<WasmInstanceObject> relocate_direct_calls_instance;
+ Handle<WasmInstanceObject> relocate_direct_calls_instance_;
- std::map<Address, Address> pointers_to_relocate;
+ std::unordered_map<Address, Address> pointers_to_relocate_;
};
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CODE_SPECIALIZATION_H_
+#endif // V8_WASM_WASM_CODE_SPECIALIZATION_H_
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.cc b/deps/v8/src/wasm/wasm-code-wrapper.cc
index 9256391543..c9eee24f3d 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.cc
+++ b/deps/v8/src/wasm/wasm-code-wrapper.cc
@@ -7,7 +7,7 @@
#include "src/objects-inl.h"
#include "src/objects/code.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -59,10 +59,17 @@ Vector<uint8_t> WasmCodeWrapper::instructions() const {
static_cast<size_t>(code->instruction_size())};
}
-Handle<WasmInstanceObject> WasmCodeWrapper::wasm_instance() const {
- return IsCodeObject()
- ? handle(WasmInstanceObject::GetOwningInstanceGC(*GetCode()))
- : handle(WasmInstanceObject::GetOwningInstance(GetWasmCode()));
+WasmInstanceObject* WasmCodeWrapper::wasm_instance() const {
+ if (IsCodeObject()) {
+ WeakCell* weak_instance =
+ WeakCell::cast(GetCode()->deoptimization_data()->get(0));
+ return WasmInstanceObject::cast(weak_instance->value());
+ }
+ return GetWasmCode()->owner()->compiled_module()->owning_instance();
+}
+
+WasmContext* WasmCodeWrapper::wasm_context() const {
+ return wasm_instance()->wasm_context()->get();
}
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.h b/deps/v8/src/wasm/wasm-code-wrapper.h
index 7d978152f1..d51bc085aa 100644
--- a/deps/v8/src/wasm/wasm-code-wrapper.h
+++ b/deps/v8/src/wasm/wasm-code-wrapper.h
@@ -1,8 +1,8 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CODE_WRAPPER_H_
-#define V8_WASM_CODE_WRAPPER_H_
+#ifndef V8_WASM_WASM_CODE_WRAPPER_H_
+#define V8_WASM_WASM_CODE_WRAPPER_H_
#include "src/handles.h"
@@ -13,6 +13,7 @@ class WasmCode;
} // namespace wasm
class Code;
+struct WasmContext;
class WasmInstanceObject;
// TODO(mtrofin): remove once we remove FLAG_wasm_jit_to_native
@@ -30,7 +31,8 @@ class WasmCodeWrapper {
Vector<uint8_t> instructions() const;
- Handle<WasmInstanceObject> wasm_instance() const;
+ WasmInstanceObject* wasm_instance() const;
+ WasmContext* wasm_context() const;
#ifdef ENABLE_DISASSEMBLER
void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
@@ -45,4 +47,4 @@ class WasmCodeWrapper {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CODE_WRAPPER_H_
+#endif // V8_WASM_WASM_CODE_WRAPPER_H_
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 5e7ce1e4f5..932501d776 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_CONSTANTS_H_
-#define V8_WASM_CONSTANTS_H_
+#ifndef V8_WASM_WASM_CONSTANTS_H_
+#define V8_WASM_WASM_CONSTANTS_H_
namespace v8 {
namespace internal {
@@ -80,4 +80,4 @@ constexpr WasmCodePosition kNoCodePosition = -1;
} // namespace internal
} // namespace v8
-#endif // V8_WASM_CONSTANTS_H_
+#endif // V8_WASM_WASM_CONSTANTS_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 87995df4e6..08d436ffa4 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -438,7 +438,6 @@ class InterpreterHandle {
Handle<JSObject> GetLocalScopeObject(wasm::InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = debug_info->GetIsolate();
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
Handle<JSObject> local_scope_object =
isolate_->factory()->NewJSObjectWithNullProto();
@@ -497,8 +496,6 @@ class InterpreterHandle {
Handle<JSArray> GetScopeDetails(Address frame_pointer, int frame_index,
Handle<WasmDebugInfo> debug_info) {
auto frame = GetInterpretedFrame(frame_pointer, frame_index);
- Isolate* isolate = debug_info->GetIsolate();
- Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
Handle<FixedArray> global_scope =
isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
@@ -591,8 +588,7 @@ void RedirectCallsitesInCodeGC(Code* code, CodeRelocationMapGC& map) {
Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
Handle<Code>* new_target = map.Find(target);
if (!new_target) continue;
- it.rinfo()->set_target_address(code->GetIsolate(),
- (*new_target)->instruction_start());
+ it.rinfo()->set_target_address((*new_target)->instruction_start());
}
}
@@ -606,7 +602,7 @@ void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
Address target = it.rinfo()->target_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
- it.rinfo()->set_wasm_call_address(isolate, new_target->second);
+ it.rinfo()->set_wasm_call_address(new_target->second);
}
}
@@ -618,7 +614,7 @@ void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
Address target = it.rinfo()->js_to_wasm_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
- it.rinfo()->set_js_to_wasm_address(isolate, new_target->second);
+ it.rinfo()->set_js_to_wasm_address(new_target->second);
}
}
@@ -685,7 +681,9 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
auto interp_handle =
Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
debug_info->set(kInterpreterHandleIndex, *interp_handle);
- return interp_handle->get()->interpreter();
+ auto ret = interp_handle->get()->interpreter();
+ ret->SetCallIndirectTestMode();
+ return ret;
}
bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
@@ -850,12 +848,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
debug_info->set_c_wasm_entries(*entries);
}
DCHECK(entries->get(index)->IsUndefined(isolate));
- Address context_address = reinterpret_cast<Address>(
- debug_info->wasm_instance()->has_memory_object()
- ? debug_info->wasm_instance()->wasm_context()
- : nullptr);
- Handle<Code> new_entry_code =
- compiler::CompileCWasmEntry(isolate, sig, context_address);
+ Handle<Code> new_entry_code = compiler::CompileCWasmEntry(isolate, sig);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
Handle<SharedFunctionInfo> shared =
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 4c84b70dbd..460742d15a 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/wasm/wasm-engine.h"
+
#include "src/objects-inl.h"
#include "src/wasm/module-compiler.h"
@@ -18,6 +19,106 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
return result.ok();
}
+MaybeHandle<WasmModuleObject> WasmEngine::SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kAsmJsOrigin);
+ CHECK(!result.failed());
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
+ asm_js_script, asm_js_offset_table_bytes);
+}
+
+MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
+ ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
+ bytes.end(), false, kWasmOrigin);
+ if (result.failed()) {
+ thrower->CompileFailed("Wasm decoding failed", result);
+ return {};
+ }
+
+ // Transfer ownership of the WasmModule to the {WasmModuleWrapper} generated
+ // in {CompileToModuleObject}.
+ return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
+ Handle<Script>(), Vector<const byte>());
+}
+
+MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory) {
+ return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
+ memory);
+}
+
+void WasmEngine::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports) {
+ ErrorThrower thrower(isolate, nullptr);
+ MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+ isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
+ if (thrower.error()) {
+ MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+ Handle<WasmInstanceObject> instance = instance_object.ToHandleChecked();
+ MaybeHandle<Object> result = JSPromise::Resolve(promise, instance);
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+}
+
+void WasmEngine::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes, bool is_shared) {
+ if (!FLAG_wasm_async_compilation) {
+ // Asynchronous compilation disabled; fall back on synchronous compilation.
+ ErrorThrower thrower(isolate, "WasmCompile");
+ MaybeHandle<WasmModuleObject> module_object;
+ if (is_shared) {
+ // Make a copy of the wire bytes to avoid concurrent modification.
+ std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ i::wasm::ModuleWireBytes bytes_copy(copy.get(),
+ copy.get() + bytes.length());
+ module_object = SyncCompile(isolate, &thrower, bytes_copy);
+ } else {
+ // The wire bytes are not shared, OK to use them directly.
+ module_object = SyncCompile(isolate, &thrower, bytes);
+ }
+ if (thrower.error()) {
+ MaybeHandle<Object> result = JSPromise::Reject(promise, thrower.Reify());
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ MaybeHandle<Object> result = JSPromise::Resolve(promise, module);
+ CHECK_EQ(result.is_null(), isolate->has_pending_exception());
+ return;
+ }
+
+ if (FLAG_wasm_test_streaming) {
+ std::shared_ptr<StreamingDecoder> streaming_decoder =
+ isolate->wasm_engine()
+ ->compilation_manager()
+ ->StartStreamingCompilation(isolate, handle(isolate->context()),
+ promise);
+ streaming_decoder->OnBytesReceived(bytes.module_bytes());
+ streaming_decoder->Finish();
+ return;
+ }
+ // Make a copy of the wire bytes in case the user program changes them
+ // during asynchronous compilation.
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ isolate->wasm_engine()->compilation_manager()->StartAsyncCompileJob(
+ isolate, std::move(copy), bytes.length(), handle(isolate->context()),
+ promise);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index bf06b47ed7..8a698c83b9 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef WASM_ENGINE_H_
-#define WASM_ENGINE_H_
+#ifndef V8_WASM_WASM_ENGINE_H_
+#define V8_WASM_WASM_ENGINE_H_
#include <memory>
@@ -14,8 +14,14 @@
namespace v8 {
namespace internal {
+class WasmModuleObject;
+class WasmInstanceObject;
+
namespace wasm {
+class ErrorThrower;
+struct ModuleWireBytes;
+
// The central data structure that represents an engine instance capable of
// loading, instantiating, and executing WASM code.
class V8_EXPORT_PRIVATE WasmEngine {
@@ -23,8 +29,44 @@ class V8_EXPORT_PRIVATE WasmEngine {
explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
: code_manager_(std::move(code_manager)) {}
+ // Synchronously validates the given bytes that represent an encoded WASM
+ // module.
bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes);
+ // Synchronously compiles the given bytes that represent a translated
+ // asm.js module.
+ MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes);
+
+ // Synchronously compiles the given bytes that represent an encoded WASM
+ // module.
+ MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes);
+
+ // Synchronously instantiate the given WASM module with the given imports.
+ // If the module represents an asm.js module, then the supplied {memory}
+ // should be used as the memory of the instance.
+ MaybeHandle<WasmInstanceObject> SyncInstantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+ MaybeHandle<JSArrayBuffer> memory);
+
+ // Begin an asynchronous compilation of the given bytes that represent an
+ // encoded WASM module, placing the result in the supplied {promise}.
+ // The {is_shared} flag indicates if the bytes backing the module could
+ // be shared across threads, i.e. could be concurrently modified.
+ void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes, bool is_shared);
+
+ // Begin an asynchronous instantiation of the given WASM module, placing the
+ // result in the supplied {promise}.
+ void AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<WasmModuleObject> module_object,
+ MaybeHandle<JSReceiver> imports);
+
CompilationManager* compilation_manager() { return &compilation_manager_; }
WasmCodeManager* code_manager() const { return code_manager_.get(); }
@@ -43,4 +85,4 @@ class V8_EXPORT_PRIVATE WasmEngine {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_ENGINE_H_
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index dea620338a..d44f5b242f 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -4,8 +4,8 @@
#include <stdint.h>
-#ifndef WASM_EXTERNAL_REFS_H
-#define WASM_EXTERNAL_REFS_H
+#ifndef V8_WASM_WASM_EXTERNAL_REFS_H_
+#define V8_WASM_WASM_EXTERNAL_REFS_H_
namespace v8 {
namespace internal {
@@ -77,4 +77,5 @@ void call_trap_callback_for_testing();
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_EXTERNAL_REFS_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 2f8fb0bf4a..3bcb1b5ef6 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -477,6 +477,17 @@ int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
int64_t output;
if (!float64_to_int64_wrapper(&a, &output)) {
@@ -485,6 +496,17 @@ int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
return output;
}
+int64_t ExecuteI64SConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64SConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<int64_t>::min()
+ : std::numeric_limits<int64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
uint64_t output;
if (!float32_to_uint64_wrapper(&a, &output)) {
@@ -493,6 +515,17 @@ uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF32(float a) {
+ TrapReason base_trap = kTrapCount;
+ uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
uint64_t output;
if (!float64_to_uint64_wrapper(&a, &output)) {
@@ -501,6 +534,17 @@ uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
return output;
}
+uint64_t ExecuteI64UConvertSatF64(double a) {
+ TrapReason base_trap = kTrapCount;
+ int64_t val = ExecuteI64UConvertF64(a, &base_trap);
+ if (base_trap == kTrapCount) {
+ return val;
+ }
+ return std::isnan(a) ? 0
+ : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
+ : std::numeric_limits<uint64_t>::max());
+}
+
inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
return static_cast<int64_t>(a);
}
@@ -924,6 +968,9 @@ class CodeMap {
// This handle is set and reset by the SetInstanceObject() /
// ClearInstanceObject() method, which is used by the HeapObjectsScope.
Handle<WasmInstanceObject> instance_;
+ // TODO(wasm): Remove this testing wart. It is needed because interpreter
+ // entry stubs are not generated in testing the interpreter in cctests.
+ bool call_indirect_through_module_ = false;
public:
CodeMap(Isolate* isolate, const WasmModule* module,
@@ -942,6 +989,12 @@ class CodeMap {
}
}
+ bool call_indirect_through_module() { return call_indirect_through_module_; }
+
+ void set_call_indirect_through_module(bool val) {
+ call_indirect_through_module_ = val;
+ }
+
void SetInstanceObject(Handle<WasmInstanceObject> instance) {
DCHECK(instance_.is_null());
instance_ = instance;
@@ -987,12 +1040,34 @@ class CodeMap {
}
InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
+ uint32_t saved_index;
+ USE(saved_index);
if (table_index >= module_->function_tables.size()) return nullptr;
+ // Mask table index for SSCA mitigation.
+ saved_index = table_index;
+ table_index &=
+ static_cast<int32_t>((table_index - module_->function_tables.size()) &
+ ~static_cast<int32_t>(table_index)) >>
+ 31;
+ DCHECK_EQ(table_index, saved_index);
const WasmIndirectFunctionTable* table =
&module_->function_tables[table_index];
if (entry_index >= table->values.size()) return nullptr;
+ // Mask entry_index for SSCA mitigation.
+ saved_index = entry_index;
+ entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
+ ~static_cast<int32_t>(entry_index)) >>
+ 31;
+ DCHECK_EQ(entry_index, saved_index);
uint32_t index = table->values[entry_index];
if (index >= interpreter_code_.size()) return nullptr;
+ // Mask index for SSCA mitigation.
+ saved_index = index;
+ index &= static_cast<int32_t>((index - interpreter_code_.size()) &
+ ~static_cast<int32_t>(index)) >>
+ 31;
+ DCHECK_EQ(index, saved_index);
+
return GetCode(index);
}
@@ -1543,9 +1618,21 @@ class ThreadImpl {
case kExprI32UConvertSatF64:
Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
return true;
+ case kExprI64SConvertSatF32:
+ Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64UConvertSatF32:
+ Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
+ return true;
+ case kExprI64SConvertSatF64:
+ Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
+ return true;
+ case kExprI64UConvertSatF64:
+ Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
+ return true;
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
return false;
@@ -1912,7 +1999,7 @@ class ThreadImpl {
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, operand.index);
+ CallIndirectFunction(0, entry_index, operand.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
@@ -2071,6 +2158,9 @@ class ThreadImpl {
WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
Push(WasmValue(result));
len = 1 + operand.length;
+ // Treat one grow_memory instruction like 1000 other instructions,
+ // because it is a really expensive operation.
+ if (max > 0) max = std::max(0, max - 1000);
break;
}
case kExprMemorySize: {
@@ -2152,8 +2242,8 @@ class ThreadImpl {
#undef EXECUTE_UNOP
default:
- V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
- code->start[pc], OpcodeName(code->start[pc]));
+ FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
+ OpcodeName(code->start[pc]));
UNREACHABLE();
}
@@ -2386,18 +2476,24 @@ class ThreadImpl {
arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer data pointer in a handle. As this is an aligned
- // pointer, to the GC it will look like a Smi.
+ // Wrap the arg_buffer data pointer and the WasmContext* in a handle. As
+ // this is an aligned pointer, to the GC it will look like a Smi.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate);
DCHECK(!arg_buffer_obj->IsHeapObject());
+ static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
+ "code below needs adaption");
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
+ WasmContext* context = code.wasm_context();
+ Handle<Object> context_obj(reinterpret_cast<Object*>(context), isolate);
+ DCHECK(!context_obj->IsHeapObject());
args[compiler::CWasmEntryParameters::kCodeObject] =
code.IsCodeObject()
? Handle<Object>::cast(code.GetCode())
: Handle<Object>::cast(isolate->factory()->NewForeign(
code.GetWasmCode()->instructions().start(), TENURED));
+ args[compiler::CWasmEntryParameters::kWasmContext] = context_obj;
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -2466,13 +2562,19 @@ class ThreadImpl {
DCHECK(AllowHeapAllocation::IsAllowed());
if (code->kind() == wasm::WasmCode::kFunction) {
- DCHECK_EQ(code->owner()->compiled_module()->owning_instance(),
- codemap()->instance());
+ if (code->owner()->compiled_module()->owning_instance() !=
+ codemap()->instance()) {
+ return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
+ signature);
+ }
return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
}
+
if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- } else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper) {
+ }
+ if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper ||
+ code->kind() == wasm::WasmCode::kInterpreterStub) {
return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
signature);
}
@@ -2501,23 +2603,8 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- bool no_func_tables = !codemap()->has_instance();
- if (FLAG_wasm_jit_to_native) {
- no_func_tables = no_func_tables || codemap()
- ->instance()
- ->compiled_module()
- ->GetNativeModule()
- ->function_tables()
- .empty();
- } else {
- no_func_tables =
- no_func_tables ||
- !codemap()->instance()->compiled_module()->has_function_tables();
- }
- if (no_func_tables) {
- // No instance. Rely on the information stored in the WasmModule.
- // TODO(wasm): This is only needed for testing. Refactor testing to use
- // the same paths as production.
+ if (codemap()->call_indirect_through_module()) {
+ // Rely on the information stored in the WasmModule.
InterpreterCode* code =
codemap()->GetIndirectCode(table_index, entry_index);
if (!code) return {ExternalCallResult::INVALID_FUNC};
@@ -2551,7 +2638,7 @@ class ThreadImpl {
DCHECK_EQ(canonical_sig_index,
module()->signature_map.Find(module()->signatures[sig_index]));
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
// Check signature.
FixedArray* fun_tables = compiled_module->function_tables();
if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
@@ -2578,33 +2665,23 @@ class ThreadImpl {
target_gc = Code::cast(fun_table->get(
compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
} else {
- // Check signature.
- std::vector<GlobalHandleAddress>& fun_tables =
- compiled_module->GetNativeModule()->function_tables();
- if (table_index >= fun_tables.size()) {
+ // The function table is stored in the wasm context.
+ // TODO(wasm): the wasm interpreter currently supports only one table.
+ CHECK_EQ(0, table_index);
+ // Bounds check against table size.
+ if (entry_index >= wasm_context_->table_size) {
return {ExternalCallResult::INVALID_FUNC};
}
- // Reconstitute the global handle to the function table, from the
- // address stored in the respective table of tables.
- FixedArray* fun_table =
- *reinterpret_cast<FixedArray**>(fun_tables[table_index]);
- // Function tables store <smi, code> pairs.
- int num_funcs_in_table =
- fun_table->length() / compiler::kFunctionTableEntrySize;
- if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- int found_sig = Smi::ToInt(fun_table->get(
- compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
- if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ // Signature check.
+ int32_t entry_sig = wasm_context_->table[entry_index].sig_id;
+ if (entry_sig != static_cast<int32_t>(canonical_sig_index)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
-
+ // Load the target address (first instruction of code).
+ Address first_instr = wasm_context_->table[entry_index].target;
+ // TODO(titzer): load the wasm context instead of relying on the
+ // target code being specialized to the target instance.
// Get code object.
- Address first_instr =
- Foreign::cast(fun_table->get(compiler::FunctionTableCodeOffset(
- static_cast<int>(entry_index))))
- ->foreign_address();
target =
isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
first_instr);
@@ -2897,6 +2974,10 @@ void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
internals_->codemap_.SetFunctionCode(function, start, end);
}
+void WasmInterpreter::SetCallIndirectTestMode() {
+ internals_->codemap_.set_call_indirect_through_module(true);
+}
+
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
// Create some dummy structures, to avoid special-casing the implementation
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index b0c100b5a9..88d21c37d1 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_INTERPRETER_H_
-#define V8_WASM_INTERPRETER_H_
+#ifndef V8_WASM_WASM_INTERPRETER_H_
+#define V8_WASM_WASM_INTERPRETER_H_
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
@@ -215,6 +215,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Manually adds code to the interpreter for the given function.
void SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start, const byte* end);
+ void SetCallIndirectTestMode();
// Computes the control transfers for the given bytecode. Used internally in
// the interpreter, but exposed for testing.
@@ -230,4 +231,4 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_INTERPRETER_H_
+#endif // V8_WASM_WASM_INTERPRETER_H_
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index ce2bf42455..dc1f690a63 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -16,8 +16,6 @@
#include "src/objects.h"
#include "src/parsing/parse-info.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
@@ -40,6 +38,35 @@ namespace {
} \
} while (false)
+// Like an ErrorThrower, but turns all pending exceptions into scheduled
+// exceptions when going out of scope. Use this in API methods.
+// Note that pending exceptions are not necessarily created by the ErrorThrower,
+// but e.g. by the wasm start function. There might also be a scheduled
+// exception, created by another API call (e.g. v8::Object::Get). But there
+// should never be both pending and scheduled exceptions.
+class ScheduledErrorThrower : public ErrorThrower {
+ public:
+ ScheduledErrorThrower(i::Isolate* isolate, const char* context)
+ : ErrorThrower(isolate, context) {}
+
+ ~ScheduledErrorThrower();
+};
+
+ScheduledErrorThrower::~ScheduledErrorThrower() {
+ // There should never be both a pending and a scheduled exception.
+ DCHECK(!isolate()->has_scheduled_exception() ||
+ !isolate()->has_pending_exception());
+ // Don't throw another error if there is already a scheduled error.
+ if (isolate()->has_scheduled_exception()) {
+ Reset();
+ } else if (isolate()->has_pending_exception()) {
+ Reset();
+ isolate()->OptionalRescheduleException(false);
+ } else if (error()) {
+ isolate()->ScheduleThrow(*Reify());
+ }
+}
+
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
}
@@ -123,8 +150,7 @@ void WebAssemblyCompileStreaming(
ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(resolver->GetPromise());
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.compileStreaming()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compileStreaming()");
thrower.CompileError("Wasm code generation disallowed by embedder");
auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
CHECK_IMPLIES(!maybe.FromMaybe(false),
@@ -144,7 +170,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
thrower.CompileError("Wasm code generation disallowed by embedder");
@@ -165,7 +191,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
// Asynchronous compilation handles copying wire bytes if necessary.
- i::wasm::AsyncCompile(i_isolate, promise, bytes, is_shared);
+ i_isolate->wasm_engine()->AsyncCompile(i_isolate, promise, bytes, is_shared);
}
// WebAssembly.validate(bytes) -> bool
@@ -173,7 +199,7 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.validate()");
bool is_shared = false;
auto bytes = GetFirstArgumentAsBytes(args, &thrower, &is_shared);
@@ -209,7 +235,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i_isolate->wasm_module_callback()(args)) return;
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Module must be invoked with 'new'");
@@ -233,10 +259,12 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes_copy);
+ module_obj =
+ i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj = i::wasm::SyncCompile(i_isolate, &thrower, bytes);
+ module_obj =
+ i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes);
}
if (module_obj.is_null()) return;
@@ -250,8 +278,7 @@ void WebAssemblyModuleImports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.imports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -264,8 +291,7 @@ void WebAssemblyModuleExports(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.exports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -279,8 +305,8 @@ void WebAssemblyModuleCustomSections(
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Module.customSections()");
+ ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.Module.customSections()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
@@ -303,8 +329,7 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
i::MaybeHandle<i::Object> instance_object;
{
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly Instantiation");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
i::MaybeHandle<i::JSReceiver> maybe_imports =
GetValueAsImports(ffi, &thrower);
if (thrower.error()) return {};
@@ -312,9 +337,9 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
i::Handle<i::WasmModuleObject> module_obj =
i::Handle<i::WasmModuleObject>::cast(
Utils::OpenHandle(Object::Cast(*module)));
- instance_object =
- i::wasm::SyncInstantiate(i_isolate, &thrower, module_obj, maybe_imports,
- i::MaybeHandle<i::JSArrayBuffer>());
+ instance_object = i_isolate->wasm_engine()->SyncInstantiate(
+ i_isolate, &thrower, module_obj, maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
}
DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception());
@@ -386,7 +411,7 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
if (i_isolate->wasm_instance_callback()(args)) return;
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
return;
@@ -444,8 +469,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.instantiate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
HandleScope scope(isolate);
@@ -521,7 +545,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Module()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Table must be invoked with 'new'");
return;
@@ -578,7 +602,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
if (!args.IsConstructCall()) {
thrower.TypeError("WebAssembly.Memory must be invoked with 'new'");
return;
@@ -672,8 +696,7 @@ void WebAssemblyInstanceGetExports(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Instance.exports()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance.exports()");
EXTRACT_THIS(receiver, WasmInstanceObject);
i::Handle<i::JSObject> exports_object(receiver->exports_object());
args.GetReturnValue().Set(Utils::ToLocal(exports_object));
@@ -684,8 +707,7 @@ void WebAssemblyTableGetLength(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Table.length()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.length()");
EXTRACT_THIS(receiver, WasmTableObject);
args.GetReturnValue().Set(
v8::Number::New(isolate, receiver->current_length()));
@@ -696,7 +718,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
@@ -738,7 +760,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
@@ -759,7 +781,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmTableObject);
@@ -807,8 +829,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Memory.grow()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.grow()");
Local<Context> context = isolate->GetCurrentContext();
EXTRACT_THIS(receiver, WasmMemoryObject);
@@ -826,7 +847,7 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
uint32_t old_size =
- old_buffer->byte_length()->Number() / i::wasm::kSpecMaxWasmMemoryPages;
+ old_buffer->byte_length()->Number() / i::wasm::kWasmPageSize;
int64_t new_size64 = old_size + delta_size;
if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
@@ -849,8 +870,7 @@ void WebAssemblyMemoryGetBuffer(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- i::wasm::ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.Memory.buffer");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.buffer");
EXTRACT_THIS(receiver, WasmMemoryObject);
i::Handle<i::Object> buffer_obj(receiver->array_buffer(), i_isolate);
@@ -931,7 +951,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
name, isolate->strict_function_map(), LanguageMode::kStrict);
Handle<JSFunction> cons = factory->NewFunction(args);
JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
- cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 926bd7647a..bdcc1f061e 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_JS_H_
-#define V8_WASM_JS_H_
+#ifndef V8_WASM_WASM_JS_H_
+#define V8_WASM_WASM_JS_H_
-#include "src/allocation.h"
-#include "src/base/hashmap.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -16,14 +15,9 @@ class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
bool exposed_on_global_object);
-
- // WebAssembly.Table.
- static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
-
- // WebAssembly.Memory
- static bool IsWasmMemoryObject(Isolate* isolate, Handle<Object> value);
};
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_WASM_WASM_JS_H_
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index 184b6329ba..c1011c3f89 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -48,6 +48,8 @@ static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * kWasmPageSize;
+static_assert(kV8MaxWasmMemoryBytes <= std::numeric_limits<int32_t>::max(),
+ "max memory bytes should fit in int32_t");
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index fcbe60ae0e..38cd8973a6 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -24,6 +24,9 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+#else
+ static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
+#endif
size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count);
@@ -31,7 +34,6 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
return true;
}
allocated_address_space_ -= num_bytes;
-#endif
return false;
}
@@ -44,59 +46,42 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
- // TODO(eholk): Right now require_guard_regions has no effect on 32-bit
- // systems. It may be safer to fail instead, given that other code might do
- // things that would be unsafe if they expected guard pages where there
- // weren't any.
- if (require_guard_regions) {
- // TODO(eholk): On Windows we want to make sure we don't commit the guard
- // pages yet.
-
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- *allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
- DCHECK_EQ(0, size % CommitPageSize());
-
- WasmAllocationTracker* const allocation_tracker =
- isolate->wasm_engine()->allocation_tracker();
-
- // Let the WasmAllocationTracker know we are going to reserve a bunch of
- // address space.
- if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
- // If we are over the address space limit, fail.
- return nullptr;
- }
-
- // The Reserve makes the whole region inaccessible by default.
- *allocation_base =
- isolate->array_buffer_allocator()->Reserve(*allocation_length);
- if (*allocation_base == nullptr) {
- allocation_tracker->ReleaseAddressSpace(*allocation_length);
- return nullptr;
- }
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ *allocation_length = require_guard_regions
+ ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
+ : base::bits::RoundUpToPowerOfTwo32(RoundUp(
+ static_cast<uint32_t>(size), kWasmPageSize));
+ DCHECK_GE(*allocation_length, size);
+
+ WasmAllocationTracker* const allocation_tracker =
+ isolate->wasm_engine()->allocation_tracker();
+
+ // Let the WasmAllocationTracker know we are going to reserve a bunch of
+ // address space.
+ if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
+ // If we are over the address space limit, fail.
+ return nullptr;
+ }
- void* memory = *allocation_base;
-
- // Make the part we care about accessible.
- isolate->array_buffer_allocator()->SetProtection(
- memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
-
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
-
- return memory;
- } else {
- // TODO(titzer): use guard regions for minicage and merge with above code.
- CHECK_LE(size, kV8MaxWasmMemoryBytes);
- *allocation_length =
- base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
- void* memory =
- size == 0
- ? nullptr
- : isolate->array_buffer_allocator()->Allocate(*allocation_length);
- *allocation_base = memory;
- return memory;
+ // The Reserve makes the whole region inaccessible by default.
+ *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
+ PageAllocator::kNoAccess);
+ if (*allocation_base == nullptr) {
+ allocation_tracker->ReleaseAddressSpace(*allocation_length);
+ return nullptr;
}
+
+ void* memory = *allocation_base;
+
+ // Make the part we care about accessible.
+ CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
+ PageAllocator::kReadWrite));
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
+ return memory;
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
@@ -150,8 +135,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
#endif
constexpr bool is_external = false;
+ // All buffers have guard regions now, but sometimes they are small.
+ constexpr bool has_guard_region = true;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, require_guard_regions, shared);
+ size, is_external, has_guard_region, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index c5d6ef5154..438014b417 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_MEMORY_H_
-#define V8_WASM_MEMORY_H_
+#ifndef V8_WASM_WASM_MEMORY_H_
+#define V8_WASM_WASM_MEMORY_H_
#include "src/flags.h"
#include "src/handles.h"
@@ -49,4 +49,4 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
} // namespace internal
} // namespace v8
-#endif // V8_WASM_MODULE_H_
+#endif // V8_WASM_WASM_MEMORY_H_
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index b6b9117ae5..909b62a16f 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -18,7 +18,6 @@
#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
#include "src/wasm/compilation-manager.h"
-#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-code-specialization.h"
@@ -157,7 +156,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig) {
+ WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig) {
Address new_context_address =
reinterpret_cast<Address>(owning_instance->wasm_context()->get());
if (!wasm_code.IsCodeObject()) {
@@ -173,6 +172,8 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
wasm::WasmCode* exported_wrapper =
native_module->GetExportedWrapper(wasm_code.GetWasmCode()->index());
if (exported_wrapper == nullptr) {
+ wasm::NativeModuleModificationScope native_modification_scope(
+ native_module);
Handle<Code> new_wrapper = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
exported_wrapper = native_module->AddExportedWrapper(
@@ -181,10 +182,11 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
Address target = exported_wrapper->instructions().start();
return isolate->factory()->NewForeign(target, TENURED);
}
+ CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
Handle<Code> code = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
AttachWasmFunctionInfo(isolate, code, owning_instance,
- static_cast<int>(index));
+ static_cast<int>(func_index));
return code;
}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 492c51487f..405b5f3ff4 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_MODULE_H_
-#define V8_WASM_MODULE_H_
+#ifndef V8_WASM_WASM_MODULE_H_
+#define V8_WASM_WASM_MODULE_H_
#include <memory>
@@ -275,7 +275,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig);
+ WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig);
void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
Handle<FixedArray> code_table);
@@ -323,4 +323,4 @@ class TruncatedUserString {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_MODULE_H_
+#endif // V8_WASM_WASM_MODULE_H_
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 0a85862174..4891ad671a 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OBJECTS_INL_H_
-#define V8_WASM_OBJECTS_INL_H_
+#ifndef V8_WASM_WASM_OBJECTS_INL_H_
+#define V8_WASM_WASM_OBJECTS_INL_H_
#include "src/heap/heap-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -158,4 +158,4 @@ void WasmCompiledModule::ReplaceCodeTableForTesting(
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OBJECTS_INL_H_
+#endif // V8_WASM_WASM_OBJECTS_INL_H_
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index c92a51716a..f06f3240f0 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -248,11 +248,44 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate,
}
void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ if (count == 0) return; // Degenerate case: nothing to do.
+
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
uint32_t old_size = functions()->length();
+ constexpr int kInvalidSigIndex = -1;
+
+ if (WASM_CONTEXT_TABLES) {
+ // If tables are stored in the WASM context, no code patching is
+ // necessary. We simply have to grow the raw tables in the WasmContext
+ // for each instance that has imported this table.
+
+ // TODO(titzer): replace the dispatch table with a weak list of all
+ // the instances that import a given table.
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // TODO(titzer): potentially racy update of WasmContext::table
+ WasmContext* wasm_context =
+ WasmInstanceObject::cast(dispatch_tables->get(i))
+ ->wasm_context()
+ ->get();
+ DCHECK_EQ(old_size, wasm_context->table_size);
+ uint32_t new_size = old_size + count;
+ wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
+ realloc(wasm_context->table,
+ new_size * sizeof(IndirectFunctionTableEntry)));
+ for (uint32_t j = old_size; j < new_size; j++) {
+ wasm_context->table[j].sig_id = kInvalidSigIndex;
+ wasm_context->table[j].context = nullptr;
+ wasm_context->table[j].target = nullptr;
+ }
+ wasm_context->table_size = new_size;
+ }
+ return;
+ }
+
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
Zone specialization_zone(isolate->allocator(), ZONE_NAME);
for (int i = 0; i < dispatch_tables->length();
@@ -272,24 +305,7 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
*new_function_table);
// Patch the code of the respective instance.
- if (FLAG_wasm_jit_to_native) {
- DisallowHeapAllocation no_gc;
- wasm::CodeSpecialization code_specialization(isolate,
- &specialization_zone);
- WasmInstanceObject* instance =
- WasmInstanceObject::cast(dispatch_tables->get(i));
- WasmCompiledModule* compiled_module = instance->compiled_module();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- wasm::NativeModuleModificationScope native_module_modification_scope(
- native_module);
- GlobalHandleAddress old_function_table_addr =
- native_module->function_tables()[table_index];
- code_specialization.PatchTableSize(old_size, old_size + count);
- code_specialization.RelocatePointer(old_function_table_addr,
- new_function_table_addr);
- code_specialization.ApplyToWholeInstance(instance);
- native_module->function_tables()[table_index] = new_function_table_addr;
- } else {
+ if (!WASM_CONTEXT_TABLES) {
DisallowHeapAllocation no_gc;
wasm::CodeSpecialization code_specialization(isolate,
&specialization_zone);
@@ -311,70 +327,104 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
}
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
- int32_t index, Handle<JSFunction> function) {
+ int32_t table_index, Handle<JSFunction> function) {
Handle<FixedArray> array(table->functions(), isolate);
+ if (function.is_null()) {
+ ClearDispatchTables(table, table_index); // Degenerate case of null value.
+ array->set(table_index, isolate->heap()->null_value());
+ return;
+ }
+
+ // TODO(titzer): Change this to MaybeHandle<WasmExportedFunction>
+ auto exported_function = Handle<WasmExportedFunction>::cast(function);
+ auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ DCHECK_NOT_NULL(wasm_function);
+ DCHECK_NOT_NULL(wasm_function->sig);
+ WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
+ UpdateDispatchTables(isolate, table, table_index, wasm_function->sig,
+ handle(exported_function->instance()), wasm_code,
+ exported_function->function_index());
+ array->set(table_index, *function);
+}
+
+void WasmTableObject::UpdateDispatchTables(
+ Isolate* isolate, Handle<WasmTableObject> table, int table_index,
+ wasm::FunctionSig* sig, Handle<WasmInstanceObject> from_instance,
+ WasmCodeWrapper wasm_code, int func_index) {
+ if (WASM_CONTEXT_TABLES) {
+ // We simply need to update the WASM contexts for each instance
+ // that imports this table.
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
-
- wasm::FunctionSig* sig = nullptr;
- Handle<Object> code = Handle<Object>::null();
- Handle<Object> value = isolate->factory()->null_value();
-
- if (!function.is_null()) {
- auto exported_function = Handle<WasmExportedFunction>::cast(function);
- auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
- // The verification that {function} is an export was done
- // by the caller.
- DCHECK(wasm_function != nullptr && wasm_function->sig != nullptr);
- sig = wasm_function->sig;
- value = function;
- // TODO(titzer): Make JSToWasm wrappers just call the WASM to WASM wrapper,
- // and then we can just reuse the WASM to WASM wrapper.
- WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
- wasm::NativeModule* native_module =
- wasm_code.IsCodeObject() ? nullptr : wasm_code.GetWasmCode()->owner();
- CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
- wasm::NativeModuleModificationScope native_modification_scope(
- native_module);
- code = wasm::GetOrCreateIndirectCallWrapper(
- isolate, handle(exported_function->instance()), wasm_code,
- exported_function->function_index(), sig);
- }
- UpdateDispatchTables(table, index, sig, code);
- array->set(index, *value);
-}
-
-void WasmTableObject::UpdateDispatchTables(Handle<WasmTableObject> table,
- int index, wasm::FunctionSig* sig,
- Handle<Object> code_or_foreign) {
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ auto sig_id = to_instance->module()->signature_map.Find(sig);
+ auto& entry = to_instance->wasm_context()->get()->table[table_index];
+ entry.sig_id = sig_id;
+ entry.context = from_instance->wasm_context()->get();
+ entry.target = wasm_code.instructions().start();
+ }
+ } else {
+ // We may need to compile a new WASM->WASM wrapper for this.
+ Handle<Object> code_or_foreign = wasm::GetOrCreateIndirectCallWrapper(
+ isolate, from_instance, wasm_code, func_index, sig);
+
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ auto sig_id = to_instance->module()->signature_map.Find(sig);
+
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+
+ function_table->set(compiler::FunctionTableSigOffset(table_index),
+ Smi::FromInt(sig_id));
+ function_table->set(compiler::FunctionTableCodeOffset(table_index),
+ *code_or_foreign);
+ }
+ }
+}
+
+void WasmTableObject::ClearDispatchTables(Handle<WasmTableObject> table,
+ int index) {
DisallowHeapAllocation no_gc;
FixedArray* dispatch_tables = table->dispatch_tables();
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
- FixedArray* function_table = FixedArray::cast(
- dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
- Smi* sig_smi = Smi::FromInt(-1);
- Object* code = Smi::kZero;
- if (sig) {
- DCHECK(code_or_foreign->IsCode() || code_or_foreign->IsForeign());
- WasmInstanceObject* instance = WasmInstanceObject::cast(
+ if (WASM_CONTEXT_TABLES) {
+ constexpr int kInvalidSigIndex = -1; // TODO(titzer): move to header.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset));
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- auto sig_index = instance->module()->signature_map.Find(sig);
- sig_smi = Smi::FromInt(sig_index);
- code = *code_or_foreign;
+ DCHECK_LT(index, to_instance->wasm_context()->get()->table_size);
+ auto& entry = to_instance->wasm_context()->get()->table[index];
+ entry.sig_id = kInvalidSigIndex;
+ entry.context = nullptr;
+ entry.target = nullptr;
} else {
- DCHECK(code_or_foreign.is_null());
+ FixedArray* function_table = FixedArray::cast(
+ dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
+ function_table->set(compiler::FunctionTableSigOffset(index),
+ Smi::FromInt(-1));
+ function_table->set(compiler::FunctionTableCodeOffset(index), Smi::kZero);
}
- function_table->set(compiler::FunctionTableSigOffset(index), sig_smi);
- function_table->set(compiler::FunctionTableCodeOffset(index), code);
}
}
namespace {
-
Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
uint32_t pages, uint32_t maximum_pages,
@@ -393,20 +443,22 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
- const bool enable_guard_regions =
- old_buffer.is_null() ? use_trap_handler : old_buffer->has_guard_region();
size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null();
}
- if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
+ // Reusing the backing store from externalized buffers causes problems with
+ // Blink's array buffers. The connection between the two is lost, which can
+ // lead to Blink not knowing about the other reference to the buffer and
+ // freeing it too early.
+ if (!old_buffer->is_external() && old_size != 0 &&
+ ((new_size < old_buffer->allocation_length()) || old_size == new_size)) {
DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
- isolate->array_buffer_allocator()->SetProtection(
- old_mem_start, new_size,
- v8::ArrayBuffer::Allocator::Protection::kReadWrite);
+ CHECK(i::SetPermissions(old_mem_start, new_size,
+ PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
}
@@ -426,23 +478,13 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
} else {
bool free_memory = false;
Handle<JSArrayBuffer> new_buffer;
- if (pages != 0) {
- // Allocate a new buffer and memcpy the old contents.
- free_memory = true;
- new_buffer =
- wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null() || old_size == 0) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- memcpy(new_mem_start, old_mem_start, old_size);
- DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
- DCHECK(old_buffer.is_null() || !old_buffer->has_guard_region());
- } else {
- // Reuse the prior backing store, but allocate a new array buffer.
- new_buffer = wasm::SetupArrayBuffer(
- isolate, old_buffer->allocation_base(),
- old_buffer->allocation_length(), old_buffer->backing_store(),
- new_size, old_buffer->is_external(), old_buffer->has_guard_region());
- }
+ // Allocate a new buffer and memcpy the old contents.
+ free_memory = true;
+ new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
+ if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
}
@@ -667,6 +709,91 @@ void WasmInstanceObject::ValidateOrphanedInstanceForTesting(
CHECK(compiled_module->weak_wasm_module()->cleared());
}
+namespace {
+void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
+ JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
+ WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+ Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ // If a link to shared memory instances exists, update the list of memory
+ // instances before the instance is destroyed.
+ WasmCompiledModule* compiled_module = owner->compiled_module();
+ wasm::NativeModule* native_module = compiled_module->GetNativeModule();
+ if (FLAG_wasm_jit_to_native) {
+ if (native_module) {
+ TRACE("Finalizing %zu {\n", native_module->instance_id);
+ } else {
+ TRACE("Finalized already cleaned up compiled module\n");
+ }
+ } else {
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
+
+ if (compiled_module->use_trap_handler()) {
+ // TODO(6792): No longer needed once WebAssembly code is off heap.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ DisallowHeapAllocation no_gc;
+ FixedArray* code_table = compiled_module->code_table();
+ for (int i = 0; i < code_table->length(); ++i) {
+ Code* code = Code::cast(code_table->get(i));
+ int index = code->trap_handler_index()->value();
+ if (index >= 0) {
+ trap_handler::ReleaseHandlerData(index);
+ code->set_trap_handler_index(
+ Smi::FromInt(trap_handler::kInvalidIndex));
+ }
+ }
+ }
+ }
+ WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
+
+ // Since the order of finalizers is not guaranteed, it can be the case
+ // that {instance->compiled_module()->module()}, which is a
+ // {Managed<WasmModule>} has been collected earlier in this GC cycle.
+ // Weak references to this instance won't be cleared until
+ // the next GC cycle, so we need to manually break some links (such as
+ // the weak references from {WasmMemoryObject::instances}.
+ if (owner->has_memory_object()) {
+ Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
+ Handle<WasmInstanceObject> instance(owner, isolate);
+ WasmMemoryObject::RemoveInstance(isolate, memory, instance);
+ }
+
+ // weak_wasm_module may have been cleared, meaning the module object
+ // was GC-ed. We still want to maintain the links between instances, to
+ // release the WasmCompiledModule corresponding to the WasmModuleInstance
+ // being finalized here.
+ WasmModuleObject* wasm_module = nullptr;
+ if (!weak_wasm_module->cleared()) {
+ wasm_module = WasmModuleObject::cast(weak_wasm_module->value());
+ WasmCompiledModule* current_template = wasm_module->compiled_module();
+
+ DCHECK(!current_template->has_prev_instance());
+ if (current_template == compiled_module) {
+ if (!compiled_module->has_next_instance()) {
+ WasmCompiledModule::Reset(isolate, compiled_module);
+ } else {
+ WasmModuleObject::cast(wasm_module)
+ ->set_compiled_module(compiled_module->next_instance());
+ }
+ }
+ }
+
+ compiled_module->RemoveFromChain();
+
+ compiled_module->reset_weak_owning_instance();
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ TRACE("}\n");
+}
+
+} // namespace
+
+void WasmInstanceObject::InstallFinalizer(Isolate* isolate,
+ Handle<WasmInstanceObject> instance) {
+ Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ InstanceFinalizer, v8::WeakCallbackType::kFinalizer);
+}
+
bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
if (!object->IsJSFunction()) return false;
Handle<JSFunction> js_function(JSFunction::cast(object));
@@ -721,8 +848,11 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
shared->set_length(arity);
shared->set_internal_formal_parameter_count(arity);
NewFunctionArgs args = NewFunctionArgs::ForWasm(
- name, export_wrapper, isolate->sloppy_function_map());
+ name, export_wrapper, isolate->sloppy_function_without_prototype_map());
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
+ // According to the spec, exported functions should not have a [[Construct]]
+ // method.
+ DCHECK(!js_function->IsConstructor());
js_function->set_shared(*shared);
Handle<Symbol> instance_symbol(isolate->factory()->wasm_instance_symbol());
@@ -917,7 +1047,7 @@ int FindBreakpointInfoInsertPos(Isolate* isolate,
void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
int position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = shared->GetIsolate();
Handle<FixedArray> breakpoint_infos;
if (shared->has_breakpoint_infos()) {
@@ -937,7 +1067,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
position) {
Handle<BreakPointInfo> old_info(
BreakPointInfo::cast(breakpoint_infos->get(insert_pos)), isolate);
- BreakPointInfo::SetBreakPoint(old_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(old_info, break_point);
return;
}
@@ -964,7 +1094,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
// Generate new BreakpointInfo.
Handle<BreakPointInfo> breakpoint_info =
isolate->factory()->NewBreakPointInfo(position);
- BreakPointInfo::SetBreakPoint(breakpoint_info, break_point_object);
+ BreakPointInfo::SetBreakPoint(breakpoint_info, break_point);
// Now insert new position at insert_pos.
new_breakpoint_infos->set(insert_pos, *breakpoint_info);
@@ -1005,6 +1135,7 @@ void WasmSharedModuleData::PrepareForLazyCompilation(
Handle<WasmSharedModuleData> shared) {
if (shared->has_lazy_compilation_orchestrator()) return;
Isolate* isolate = shared->GetIsolate();
+ // TODO(titzer): remove dependency on module-compiler.h
auto orch_handle =
Managed<wasm::LazyCompilationOrchestrator>::Allocate(isolate);
shared->set_lazy_compilation_orchestrator(*orch_handle);
@@ -1235,9 +1366,8 @@ MaybeHandle<FixedArray> WasmSharedModuleData::CheckBreakPoints(
Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
if (breakpoint_info->source_position() != position) return {};
- Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
- isolate);
- return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
+ Handle<Object> break_points(breakpoint_info->break_points(), isolate);
+ return isolate->debug()->GetHitBreakPoints(break_points);
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
@@ -1303,13 +1433,7 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
// has_code_table and pass undefined.
compiled_module->set_code_table(*code_table);
- native_module->function_tables() = function_tables;
- native_module->empty_function_tables() = function_tables;
-
int function_count = static_cast<int>(module->functions.size());
- Handle<FixedArray> handler_table =
- isolate->factory()->NewFixedArray(function_count, TENURED);
- compiled_module->set_handler_table(*handler_table);
Handle<FixedArray> source_positions =
isolate->factory()->NewFixedArray(function_count, TENURED);
compiled_module->set_source_positions(*source_positions);
@@ -1338,6 +1462,10 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
return ret;
}
+ Handle<FixedArray> export_copy = isolate->factory()->CopyFixedArray(
+ handle(module->export_wrappers(), isolate));
+ ret->set_export_wrappers(*export_copy);
+
std::unique_ptr<wasm::NativeModule> native_module =
module->GetNativeModule()->Clone();
// construct the wrapper in 2 steps, because its construction may trigger GC,
@@ -1387,65 +1515,6 @@ wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
return Managed<wasm::NativeModule>::cast(native_module())->get();
}
-void WasmCompiledModule::ResetGCModel(Isolate* isolate,
- WasmCompiledModule* compiled_module) {
- DisallowHeapAllocation no_gc;
- TRACE("Resetting %d\n", compiled_module->instance_id());
- Object* undefined = *isolate->factory()->undefined_value();
- Object* fct_obj = compiled_module->code_table();
- if (fct_obj != nullptr && fct_obj != undefined) {
- // Patch code to update memory references, global references, and function
- // table references.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
-
- // Reset function tables.
- if (compiled_module->has_function_tables()) {
- FixedArray* function_tables = compiled_module->function_tables();
- FixedArray* empty_function_tables =
- compiled_module->empty_function_tables();
- if (function_tables != empty_function_tables) {
- DCHECK_EQ(function_tables->length(), empty_function_tables->length());
- for (int i = 0, e = function_tables->length(); i < e; ++i) {
- GlobalHandleAddress func_addr =
- WasmCompiledModule::GetTableValue(function_tables, i);
- code_specialization.RelocatePointer(
- func_addr,
- WasmCompiledModule::GetTableValue(empty_function_tables, i));
- }
- compiled_module->set_function_tables(empty_function_tables);
- }
- }
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- FixedArray* functions = FixedArray::cast(fct_obj);
- for (int i = compiled_module->num_imported_functions(),
- end = functions->length();
- i < end; ++i) {
- Code* code = Code::cast(functions->get(i));
- // Skip lazy compile stubs.
- if (code->builtin_index() == Builtins::kWasmCompileLazy) continue;
- if (code->kind() != Code::WASM_FUNCTION) {
- // From here on, there should only be wrappers for exported functions.
- for (; i < end; ++i) {
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
- Code::cast(functions->get(i))->kind());
- }
- break;
- }
- bool changed = code_specialization.ApplyToWasmCode(
- WasmCodeWrapper(handle(code)), SKIP_ICACHE_FLUSH);
- // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
- // above.
- if (changed) {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- }
- }
- }
-}
-
void WasmCompiledModule::InitId() {
#if DEBUG
static uint32_t instance_id_counter = 0;
@@ -1491,22 +1560,6 @@ void WasmCompiledModule::Reset(Isolate* isolate,
i, isolate->heap()->undefined_value());
}
}
- // Reset function tables.
- if (native_module->function_tables().size() > 0) {
- std::vector<GlobalHandleAddress>& function_tables =
- native_module->function_tables();
- std::vector<GlobalHandleAddress>& empty_function_tables =
- native_module->empty_function_tables();
-
- if (function_tables != empty_function_tables) {
- DCHECK_EQ(function_tables.size(), empty_function_tables.size());
- for (size_t i = 0, e = function_tables.size(); i < e; ++i) {
- code_specialization.RelocatePointer(function_tables[i],
- empty_function_tables[i]);
- }
- native_module->function_tables() = empty_function_tables;
- }
- }
for (uint32_t i = native_module->num_imported_functions(),
end = native_module->FunctionCount();
@@ -1519,7 +1572,7 @@ void WasmCompiledModule::Reset(Isolate* isolate,
// TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
// above.
if (changed) {
- Assembler::FlushICache(isolate, code->instructions().start(),
+ Assembler::FlushICache(code->instructions().start(),
code->instructions().size());
}
}
@@ -1646,30 +1699,23 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
}
size_t function_table_count =
compiled_module->shared()->module()->function_tables.size();
- wasm::NativeModule* native_module = compiled_module->GetNativeModule();
if (function_table_count > 0) {
// The tables are of the right size, but contain bogus global handle
// addresses. Produce new global handles for the empty tables, then reset,
// which will relocate the code. We end up with a WasmCompiledModule as-if
// it were just compiled.
- Handle<FixedArray> function_tables;
- if (!FLAG_wasm_jit_to_native) {
+ if (!WASM_CONTEXT_TABLES) {
DCHECK(compiled_module->has_function_tables());
- function_tables =
- handle(compiled_module->empty_function_tables(), isolate);
- } else {
- DCHECK_GT(native_module->function_tables().size(), 0);
- }
- for (size_t i = 0; i < function_table_count; ++i) {
- Handle<Object> global_func_table_handle =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
- GlobalHandleAddress new_func_table = global_func_table_handle.address();
- if (!FLAG_wasm_jit_to_native) {
+ Handle<FixedArray> function_tables(
+ compiled_module->empty_function_tables(), isolate);
+ for (size_t i = 0; i < function_table_count; ++i) {
+ Handle<Object> global_func_table_handle =
+ isolate->global_handles()->Create(
+ isolate->heap()->undefined_value());
+ GlobalHandleAddress new_func_table = global_func_table_handle.address();
SetTableValue(isolate, function_tables, static_cast<int>(i),
new_func_table);
- } else {
- native_module->empty_function_tables()[i] = new_func_table;
}
}
}
@@ -1761,10 +1807,9 @@ bool WasmSharedModuleData::GetPositionInfo(uint32_t position,
return true;
}
-
bool WasmCompiledModule::SetBreakPoint(
Handle<WasmCompiledModule> compiled_module, int* position,
- Handle<Object> break_point_object) {
+ Handle<BreakPoint> break_point) {
Isolate* isolate = compiled_module->GetIsolate();
Handle<WasmSharedModuleData> shared(compiled_module->shared(), isolate);
@@ -1779,7 +1824,7 @@ bool WasmCompiledModule::SetBreakPoint(
DCHECK(IsBreakablePosition(*shared, func_index, offset_in_func));
// Insert new break point into break_positions of shared module data.
- WasmSharedModuleData::AddBreakpoint(shared, *position, break_point_object);
+ WasmSharedModuleData::AddBreakpoint(shared, *position, break_point);
// Iterate over all instances of this module and tell them to set this new
// breakpoint.
@@ -1793,6 +1838,28 @@ bool WasmCompiledModule::SetBreakPoint(
return true;
}
+void WasmCompiledModule::LogWasmCodes(Isolate* isolate) {
+ wasm::NativeModule* native_module = GetNativeModule();
+ if (native_module == nullptr) return;
+ const uint32_t number_of_codes = native_module->FunctionCount();
+ if (has_shared()) {
+ Handle<WasmSharedModuleData> shared_handle(shared(), isolate);
+ for (uint32_t i = 0; i < number_of_codes; i++) {
+ wasm::WasmCode* code = native_module->GetCode(i);
+ if (code == nullptr) continue;
+ int name_length;
+ Handle<String> name(
+ WasmSharedModuleData::GetFunctionName(isolate, shared_handle, i));
+ auto cname = name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
+ RobustnessFlag::ROBUST_STRING_TRAVERSAL,
+ &name_length);
+ wasm::WasmName wasm_name(cname.get(), name_length);
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code,
+ wasm_name));
+ }
+ }
+}
+
void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
MaybeHandle<WeakCell> weak_instance,
int func_index) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index cecc11f83f..fe2ed419db 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OBJECTS_H_
-#define V8_WASM_OBJECTS_H_
+#ifndef V8_WASM_WASM_OBJECTS_H_
+#define V8_WASM_WASM_OBJECTS_H_
#include "src/base/bits.h"
#include "src/debug/debug.h"
@@ -38,6 +38,8 @@ class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
+#define WASM_CONTEXT_TABLES FLAG_wasm_jit_to_native
+
#define DECL_OOL_QUERY(type) static bool Is##type(Object* object);
#define DECL_OOL_CAST(type) static type* cast(Object* object);
@@ -55,6 +57,15 @@ class WasmInstanceObject;
static const int k##name##Offset = \
kSize + (k##name##Index - kFieldCount) * kPointerSize;
+// An entry in an indirect dispatch table.
+struct IndirectFunctionTableEntry {
+ int32_t sig_id = 0;
+ WasmContext* context = nullptr;
+ Address target = nullptr;
+
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(IndirectFunctionTableEntry)
+};
+
// Wasm context used to store the mem_size and mem_start address of the linear
// memory. These variables can be accessed at C++ level at graph build time
// (e.g., initialized during instance building / changed at runtime by
@@ -67,14 +78,27 @@ struct WasmContext {
uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
byte* globals_start = nullptr;
+ // TODO(wasm): pad these entries to a power of two.
+ IndirectFunctionTableEntry* table = nullptr;
+ uint32_t table_size = 0;
- inline void SetRawMemory(void* mem_start, size_t mem_size) {
+ void SetRawMemory(void* mem_start, size_t mem_size) {
DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
DCHECK_LE(mem_size, this->mem_mask + 1);
}
+
+ ~WasmContext() {
+ if (table) free(table);
+ mem_start = nullptr;
+ mem_size = 0;
+ mem_mask = 0;
+ globals_start = nullptr;
+ table = nullptr;
+ table_size = 0;
+ }
};
// Representation of a WebAssembly.Module JavaScript-level object.
@@ -137,9 +161,13 @@ class WasmTableObject : public JSObject {
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
int32_t index, Handle<JSFunction> function);
- static void UpdateDispatchTables(Handle<WasmTableObject> table, int index,
- wasm::FunctionSig* sig,
- Handle<Object> code_or_foreign);
+ static void UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int table_index, wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> from_instance,
+ WasmCodeWrapper wasm_code, int func_index);
+
+ static void ClearDispatchTables(Handle<WasmTableObject> table, int index);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
@@ -249,6 +277,9 @@ class WasmInstanceObject : public JSObject {
static void ValidateOrphanedInstanceForTesting(
Isolate* isolate, Handle<WasmInstanceObject> instance);
+
+ static void InstallFinalizer(Isolate* isolate,
+ Handle<WasmInstanceObject> instance);
};
// A WASM function that is wrapped and exported to JavaScript.
@@ -306,7 +337,7 @@ class WasmSharedModuleData : public FixedArray {
Handle<WasmSharedModuleData>);
static void AddBreakpoint(Handle<WasmSharedModuleData>, int position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
Handle<WasmInstanceObject>);
@@ -468,7 +499,6 @@ class WasmCompiledModule : public FixedArray {
MACRO(WASM_OBJECT, WasmCompiledModule, prev_instance) \
MACRO(WEAK_LINK, WasmInstanceObject, owning_instance) \
MACRO(WEAK_LINK, WasmModuleObject, wasm_module) \
- MACRO(OBJECT, FixedArray, handler_table) \
MACRO(OBJECT, FixedArray, source_positions) \
MACRO(OBJECT, Foreign, native_module) \
MACRO(OBJECT, FixedArray, lazy_compile_data) \
@@ -478,9 +508,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
MACRO(CONST_OBJECT, FixedArray, code_table) \
MACRO(OBJECT, FixedArray, function_tables) \
- MACRO(OBJECT, FixedArray, signature_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_signature_tables)
+ MACRO(CONST_OBJECT, FixedArray, empty_function_tables)
// TODO(mtrofin): this is unnecessary when we stop needing
// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
@@ -516,9 +544,6 @@ class WasmCompiledModule : public FixedArray {
Handle<WasmCompiledModule> module);
static void Reset(Isolate* isolate, WasmCompiledModule* module);
- // TODO(mtrofin): delete this when we don't need FLAG_wasm_jit_to_native
- static void ResetGCModel(Isolate* isolate, WasmCompiledModule* module);
-
wasm::NativeModule* GetNativeModule() const;
void InsertInChain(WasmModuleObject*);
void RemoveFromChain();
@@ -543,7 +568,7 @@ class WasmCompiledModule : public FixedArray {
// If it points outside a function, or behind the last breakable location,
// this function returns false and does not set any breakpoint.
static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
- Handle<Object> break_point_object);
+ Handle<BreakPoint> break_point);
inline void ReplaceCodeTableForTesting(
std::vector<wasm::WasmCode*>&& testing_table);
@@ -556,6 +581,8 @@ class WasmCompiledModule : public FixedArray {
static Address GetTableValue(FixedArray* table, int index);
inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
+ void LogWasmCodes(Isolate* isolate);
+
private:
void InitId();
@@ -692,4 +719,4 @@ WasmFunctionInfo GetWasmFunctionInfo(Isolate*, Handle<Code>);
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OBJECTS_H_
+#endif // V8_WASM_WASM_OBJECTS_H_
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index b503aa1a5e..ac02b549a0 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -101,9 +101,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
- // TODO(kschimpf): Add I64 versions of saturating conversions.
+ // TODO(kschimpf): Simplify after filling in other saturating operations.
CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
@@ -116,6 +118,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
+ CASE_INT_OP(SExtendI8, "sign_extend8")
+ CASE_INT_OP(SExtendI16, "sign_extend16")
+ CASE_I64_OP(SExtendI32, "sign_extend32")
CASE_OP(Unreachable, "unreachable")
CASE_OP(Nop, "nop")
CASE_OP(Block, "block")
@@ -320,6 +325,19 @@ bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsSignExtensionOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprI32SExtendI8:
+ case kExprI32SExtendI16:
+ case kExprI64SExtendI8:
+ case kExprI64SExtendI16:
+ case kExprI64SExtendI32:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 9f8232c902..c6b87f0556 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_OPCODES_H_
-#define V8_WASM_OPCODES_H_
+#ifndef V8_WASM_WASM_OPCODES_H_
+#define V8_WASM_WASM_OPCODES_H_
#include "src/globals.h"
#include "src/machine-type.h"
@@ -225,21 +225,26 @@ using WasmName = Vector<const char>;
V(I32ReinterpretF32, 0xbc, i_f) \
V(I64ReinterpretF64, 0xbd, l_d) \
V(F32ReinterpretI32, 0xbe, f_i) \
- V(F64ReinterpretI64, 0xbf, d_l)
+ V(F64ReinterpretI64, 0xbf, d_l) \
+ V(I32SExtendI8, 0xc0, i_i) \
+ V(I32SExtendI16, 0xc1, i_i) \
+ V(I64SExtendI8, 0xc2, l_l) \
+ V(I64SExtendI16, 0xc3, l_l) \
+ V(I64SExtendI32, 0xc4, l_l)
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
- V(F64Acos, 0xc2, d_d) \
- V(F64Asin, 0xc3, d_d) \
- V(F64Atan, 0xc4, d_d) \
- V(F64Cos, 0xc5, d_d) \
- V(F64Sin, 0xc6, d_d) \
- V(F64Tan, 0xc7, d_d) \
- V(F64Exp, 0xc8, d_d) \
- V(F64Log, 0xc9, d_d) \
- V(F64Atan2, 0xca, d_dd) \
- V(F64Pow, 0xcb, d_dd) \
- V(F64Mod, 0xcc, d_dd) \
+ V(F64Acos, 0xc5, d_d) \
+ V(F64Asin, 0xc6, d_d) \
+ V(F64Atan, 0xc7, d_d) \
+ V(F64Cos, 0xc8, d_d) \
+ V(F64Sin, 0xc9, d_d) \
+ V(F64Tan, 0xca, d_d) \
+ V(F64Exp, 0xcb, d_d) \
+ V(F64Log, 0xcc, d_d) \
+ V(F64Atan2, 0xcd, d_dd) \
+ V(F64Pow, 0xce, d_dd) \
+ V(F64Mod, 0xcf, d_dd) \
V(I32AsmjsDivS, 0xd0, i_ii) \
V(I32AsmjsDivU, 0xd1, i_ii) \
V(I32AsmjsRemS, 0xd2, i_ii) \
@@ -403,8 +408,11 @@ using WasmName = Vector<const char>;
V(I32SConvertSatF32, 0xfc00, i_f) \
V(I32UConvertSatF32, 0xfc01, i_f) \
V(I32SConvertSatF64, 0xfc02, i_d) \
- V(I32UConvertSatF64, 0xfc03, i_d)
-// TODO(kschimpf): Add remaining i64 numeric opcodes.
+ V(I32UConvertSatF64, 0xfc03, i_d) \
+ V(I64SConvertSatF32, 0xfc04, l_f) \
+ V(I64UConvertSatF32, 0xfc05, l_f) \
+ V(I64SConvertSatF64, 0xfc06, l_d) \
+ V(I64UConvertSatF64, 0xfc07, l_d)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
@@ -647,6 +655,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
static bool IsControlOpcode(WasmOpcode opcode);
+ static bool IsSignExtensionOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);
@@ -793,4 +802,4 @@ struct WasmInitExpr {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_OPCODES_H_
+#endif // V8_WASM_WASM_OPCODES_H_
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 7744b42923..8250db9040 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_RESULT_H_
-#define V8_WASM_RESULT_H_
+#ifndef V8_WASM_WASM_RESULT_H_
+#define V8_WASM_WASM_RESULT_H_
#include <cstdarg>
#include <memory>
@@ -158,4 +158,4 @@ class V8_EXPORT_PRIVATE ErrorThrower {
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_RESULT_H_
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 4466672f37..240ffbca3d 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -133,7 +133,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
static size_t GetCodeHeaderSize();
size_t MeasureCode(const WasmCode*) const;
size_t MeasureCopiedStubs() const;
- FixedArray* GetHandlerTable(const WasmCode*) const;
ByteArray* GetSourcePositions(const WasmCode*) const;
void BufferHeader();
@@ -187,7 +186,6 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
: isolate_(isolate), native_module_(module) {
DCHECK_NOT_NULL(isolate_);
DCHECK_NOT_NULL(native_module_);
- DCHECK_NULL(native_module_->lazy_builtin_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate_);
@@ -210,12 +208,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
size_t NativeModuleSerializer::MeasureHeader() const {
return sizeof(uint32_t) + // total wasm fct count
- sizeof(
- uint32_t) + // imported fcts - i.e. index of first wasm function
- sizeof(uint32_t) + // table count
- native_module_->specialization_data_.function_tables.size()
- // function table, containing pointers
- * sizeof(GlobalHandleAddress);
+ sizeof(uint32_t); // imported fcts - i.e. index of first wasm function
}
void NativeModuleSerializer::BufferHeader() {
@@ -225,37 +218,25 @@ void NativeModuleSerializer::BufferHeader() {
Writer writer(remaining_);
writer.Write(native_module_->FunctionCount());
writer.Write(native_module_->num_imported_functions());
- writer.Write(static_cast<uint32_t>(
- native_module_->specialization_data_.function_tables.size()));
- for (size_t i = 0,
- e = native_module_->specialization_data_.function_tables.size();
- i < e; ++i) {
- writer.Write(native_module_->specialization_data_.function_tables[i]);
- }
}
size_t NativeModuleSerializer::GetCodeHeaderSize() {
return sizeof(size_t) + // size of this section
sizeof(size_t) + // offset of constant pool
sizeof(size_t) + // offset of safepoint table
+ sizeof(size_t) + // offset of handler table
sizeof(uint32_t) + // stack slots
sizeof(size_t) + // code size
sizeof(size_t) + // reloc size
- sizeof(uint32_t) + // handler size
sizeof(uint32_t) + // source positions size
sizeof(size_t) + // protected instructions size
sizeof(bool); // is_liftoff
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
- FixedArray* handler_table = GetHandlerTable(code);
ByteArray* source_positions = GetSourcePositions(code);
return GetCodeHeaderSize() + code->instructions().size() + // code
code->reloc_info().size() + // reloc info
- (handler_table == nullptr
- ? 0
- : static_cast<uint32_t>(
- handler_table->length())) + // handler table
(source_positions == nullptr
? 0
: static_cast<uint32_t>(
@@ -325,21 +306,6 @@ void NativeModuleSerializer::BufferCopiedStubs() {
}
}
-FixedArray* NativeModuleSerializer::GetHandlerTable(
- const WasmCode* code) const {
- if (code->kind() != WasmCode::kFunction) return nullptr;
- uint32_t index = code->index();
- // We write the address, the size, and then copy the code as-is, followed
- // by reloc info, followed by handler table and source positions.
- Object* handler_table_entry =
- native_module_->compiled_module()->handler_table()->get(
- static_cast<int>(index));
- if (handler_table_entry->IsFixedArray()) {
- return FixedArray::cast(handler_table_entry);
- }
- return nullptr;
-}
-
ByteArray* NativeModuleSerializer::GetSourcePositions(
const WasmCode* code) const {
if (code->kind() != WasmCode::kFunction) return nullptr;
@@ -364,15 +330,7 @@ void NativeModuleSerializer::BufferCurrentWasmCode() {
void NativeModuleSerializer::BufferCodeInAllocatedScratch(
const WasmCode* code) {
// We write the address, the size, and then copy the code as-is, followed
- // by reloc info, followed by handler table and source positions.
- FixedArray* handler_table_entry = GetHandlerTable(code);
- uint32_t handler_table_size = 0;
- Address handler_table = nullptr;
- if (handler_table_entry != nullptr) {
- handler_table_size = static_cast<uint32_t>(handler_table_entry->length());
- handler_table = reinterpret_cast<Address>(
- handler_table_entry->GetFirstElementAddress());
- }
+ // by reloc info, followed by source positions.
ByteArray* source_positions_entry = GetSourcePositions(code);
Address source_positions = nullptr;
uint32_t source_positions_size = 0;
@@ -386,10 +344,10 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
writer.Write(MeasureCode(code));
writer.Write(code->constant_pool_offset());
writer.Write(code->safepoint_table_offset());
+ writer.Write(code->handler_table_offset());
writer.Write(code->stack_slots());
writer.Write(code->instructions().size());
writer.Write(code->reloc_info().size());
- writer.Write(handler_table_size);
writer.Write(source_positions_size);
writer.Write(code->protected_instructions().size());
writer.Write(code->is_liftoff());
@@ -398,7 +356,6 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
// write the code and everything else
writer.WriteVector(code->instructions());
writer.WriteVector(code->reloc_info());
- writer.WriteVector({handler_table, handler_table_size});
writer.WriteVector({source_positions, source_positions_size});
writer.WriteVector(
{reinterpret_cast<const byte*>(code->protected_instructions().data()),
@@ -555,16 +512,6 @@ bool NativeModuleDeserializer::ReadHeader() {
bool ok = functions == native_module_->FunctionCount() &&
imports == native_module_->num_imported_functions();
if (!ok) return false;
- size_t table_count = reader.Read<uint32_t>();
-
- std::vector<GlobalHandleAddress> funcs(table_count);
- for (size_t i = 0; i < table_count; ++i) {
- funcs[i] = reader.Read<GlobalHandleAddress>();
- }
- native_module_->function_tables() = funcs;
- // resize, so that from here on the native module can be
- // asked about num_function_tables().
- native_module_->empty_function_tables().resize(table_count);
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
@@ -592,10 +539,10 @@ bool NativeModuleDeserializer::ReadCode() {
USE(code_section_size);
size_t constant_pool_offset = reader.Read<size_t>();
size_t safepoint_table_offset = reader.Read<size_t>();
+ size_t handler_table_offset = reader.Read<size_t>();
uint32_t stack_slot_count = reader.Read<uint32_t>();
size_t code_size = reader.Read<size_t>();
size_t reloc_size = reader.Read<size_t>();
- uint32_t handler_size = reader.Read<uint32_t>();
uint32_t source_position_size = reader.Read<uint32_t>();
size_t protected_instructions_size = reader.Read<size_t>();
bool is_liftoff = reader.Read<bool>();
@@ -612,9 +559,10 @@ bool NativeModuleDeserializer::ReadCode() {
WasmCode* ret = native_module_->AddOwnedCode(
code_buffer, std::move(reloc_info), reloc_size, Just(index_),
WasmCode::kFunction, constant_pool_offset, stack_slot_count,
- safepoint_table_offset, protected_instructions, is_liftoff);
+ safepoint_table_offset, handler_table_offset, protected_instructions,
+ is_liftoff);
if (ret == nullptr) return false;
- native_module_->SetCodeTable(index_, ret);
+ native_module_->code_table_[index_] = ret;
// now relocate the code
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
@@ -634,7 +582,7 @@ bool NativeModuleDeserializer::ReadCode() {
case RelocInfo::CODE_TARGET: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo());
Address target = GetTrampolineOrStubFromTag(tag);
- iter.rinfo()->set_target_address(nullptr, target, SKIP_WRITE_BARRIER,
+ iter.rinfo()->set_target_address(target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
break;
}
@@ -643,23 +591,14 @@ bool NativeModuleDeserializer::ReadCode() {
reinterpret_cast<intptr_t>(iter.rinfo()->target_address()));
Address address =
ExternalReferenceTable::instance(isolate_)->address(orig_target);
- iter.rinfo()->set_target_runtime_entry(
- nullptr, address, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ iter.rinfo()->set_target_runtime_entry(address, SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
break;
}
default:
break;
}
}
- if (handler_size > 0) {
- Handle<FixedArray> handler_table = isolate_->factory()->NewFixedArray(
- static_cast<int>(handler_size), TENURED);
- reader.ReadIntoVector(
- {reinterpret_cast<Address>(handler_table->GetFirstElementAddress()),
- handler_size});
- native_module_->compiled_module()->handler_table()->set(
- static_cast<int>(index_), *handler_table);
- }
if (source_position_size > 0) {
Handle<ByteArray> source_positions = isolate_->factory()->NewByteArray(
static_cast<int>(source_position_size), TENURED);
@@ -743,6 +682,10 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
compiled_module->GetNativeModule());
if (!deserializer.Read(data)) return {};
+ // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}. This
+ // requires unlocking the code space here. This should be moved into the
+ // allocator eventually.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
CompileJsToWasmWrappers(isolate, compiled_module, isolate->counters());
WasmCompiledModule::ReinitializeAfterDeserialization(isolate,
compiled_module);
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 9c0e9ce10a..5bb49bfdce 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_SERIALIZATION_H_
-#define V8_WASM_SERIALIZATION_H_
+#ifndef V8_WASM_WASM_SERIALIZATION_H_
+#define V8_WASM_WASM_SERIALIZATION_H_
#include "src/wasm/wasm-objects.h"
@@ -21,4 +21,4 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_WASM_WASM_SERIALIZATION_H_
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 81c8e41813..1619241332 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -134,7 +134,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprCallIndirect: {
CallIndirectOperand<Decoder::kNoValidate> operand(&i, i.pc());
DCHECK_EQ(0, operand.table_index);
- os << "call_indirect " << operand.index;
+ os << "call_indirect " << operand.sig_index;
break;
}
case kExprCallFunction: {
@@ -208,6 +208,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
UNREACHABLE();
break;
}
+ break;
}
// This group is just printed by their internal opcode name, as they
diff --git a/deps/v8/src/wasm/wasm-text.h b/deps/v8/src/wasm/wasm-text.h
index 1608ea9a2d..60957966ab 100644
--- a/deps/v8/src/wasm/wasm-text.h
+++ b/deps/v8/src/wasm/wasm-text.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_S_EXPR_H_
-#define V8_WASM_S_EXPR_H_
+#ifndef V8_WASM_WASM_TEXT_H_
+#define V8_WASM_WASM_TEXT_H_
#include <cstdint>
#include <ostream>
@@ -35,4 +35,4 @@ void PrintWasmText(
} // namespace internal
} // namespace v8
-#endif // V8_WASM_S_EXPR_H_
+#endif // V8_WASM_WASM_TEXT_H_
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index a30657aee0..22fd13c219 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_VALUE_H_
-#define V8_WASM_VALUE_H_
+#ifndef V8_WASM_WASM_VALUE_H_
+#define V8_WASM_WASM_VALUE_H_
#include "src/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
@@ -84,4 +84,4 @@ FOREACH_WASMVAL_TYPE(DECLARE_CAST)
} // namespace internal
} // namespace v8
-#endif // V8_WASM_VALUE_H_
+#endif // V8_WASM_WASM_VALUE_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index a75a8ddd74..eef4158f53 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
@@ -23,9 +23,6 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
// Implementation of Assembler
-static const byte kCallOpcode = 0xE8;
-
-
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
@@ -95,14 +92,12 @@ void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
-
-void Assembler::emit_rex_64(Register reg, const Operand& op) {
- emit(0x48 | reg.high_bit() << 2 | op.rex_);
+void Assembler::emit_rex_64(Register reg, Operand op) {
+ emit(0x48 | reg.high_bit() << 2 | op.data().rex);
}
-
-void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+void Assembler::emit_rex_64(XMMRegister reg, Operand op) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | op.data().rex);
}
@@ -111,19 +106,14 @@ void Assembler::emit_rex_64(Register rm_reg) {
emit(0x48 | rm_reg.high_bit());
}
-
-void Assembler::emit_rex_64(const Operand& op) {
- emit(0x48 | op.rex_);
-}
-
+void Assembler::emit_rex_64(Operand op) { emit(0x48 | op.data().rex); }
void Assembler::emit_rex_32(Register reg, Register rm_reg) {
emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
}
-
-void Assembler::emit_rex_32(Register reg, const Operand& op) {
- emit(0x40 | reg.high_bit() << 2 | op.rex_);
+void Assembler::emit_rex_32(Register reg, Operand op) {
+ emit(0x40 | reg.high_bit() << 2 | op.data().rex);
}
@@ -131,26 +121,20 @@ void Assembler::emit_rex_32(Register rm_reg) {
emit(0x40 | rm_reg.high_bit());
}
-
-void Assembler::emit_rex_32(const Operand& op) {
- emit(0x40 | op.rex_);
-}
-
+void Assembler::emit_rex_32(Operand op) { emit(0x40 | op.data().rex); }
void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
-void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
- byte rex_bits = reg.high_bit() << 2 | op.rex_;
+void Assembler::emit_optional_rex_32(Register reg, Operand op) {
+ byte rex_bits = reg.high_bit() << 2 | op.data().rex;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
+void Assembler::emit_optional_rex_32(XMMRegister reg, Operand op) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | op.data().rex;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
@@ -181,8 +165,8 @@ void Assembler::emit_optional_rex_32(XMMRegister rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
-void Assembler::emit_optional_rex_32(const Operand& op) {
- if (op.rex_ != 0) emit(0x40 | op.rex_);
+void Assembler::emit_optional_rex_32(Operand op) {
+ if (op.data().rex != 0) emit(0x40 | op.data().rex);
}
@@ -195,9 +179,8 @@ void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
// byte 1 of 3-byte VEX
-void Assembler::emit_vex3_byte1(XMMRegister reg, const Operand& rm,
- LeadingOpcode m) {
- byte rxb = ~((reg.high_bit() << 2) | rm.rex_) << 5;
+void Assembler::emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m) {
+ byte rxb = ~((reg.high_bit() << 2) | rm.data().rex) << 5;
emit(rxb | m);
}
@@ -240,11 +223,10 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, Register rm,
emit_vex_prefix(ireg, ivreg, irm, l, pp, mm, w);
}
-
-void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
- const Operand& rm, VectorLength l,
- SIMDPrefix pp, LeadingOpcode mm, VexW w) {
- if (rm.rex_ || mm != k0F || w != kW0) {
+void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg, Operand rm,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
+ VexW w) {
+ if (rm.data().rex || mm != k0F || w != kW0) {
emit_vex3_byte0();
emit_vex3_byte1(reg, rm, mm);
emit_vex3_byte2(w, vreg, l, pp);
@@ -254,8 +236,7 @@ void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
}
}
-
-void Assembler::emit_vex_prefix(Register reg, Register vreg, const Operand& rm,
+void Assembler::emit_vex_prefix(Register reg, Register vreg, Operand rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
VexW w) {
XMMRegister ireg = XMMRegister::from_code(reg.code());
@@ -268,19 +249,17 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address constant_pool, Address target,
+void Assembler::set_target_address_at(Address pc, Address constant_pool,
+ Address target,
ICacheFlushMode icache_flush_mode) {
- DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc, sizeof(int32_t));
+ Assembler::FlushICache(pc, sizeof(int32_t));
}
}
void Assembler::deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
+ Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -290,8 +269,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
void Assembler::deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code, Address target) {
- set_target_address_at(isolate, instruction_payload,
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload,
code ? code->constant_pool() : nullptr, target);
}
@@ -380,7 +359,7 @@ void RelocInfo::set_target_object(HeapObject* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(target->GetIsolate(), pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
@@ -395,22 +374,22 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return origin->runtime_entry_at(pc_);
}
-void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
+void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
- set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
-void RelocInfo::WipeOut(Isolate* isolate) {
+void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(isolate, pc_, constant_pool_,
+ Assembler::set_target_address_at(pc_, constant_pool_,
pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
@@ -418,11 +397,11 @@ void RelocInfo::WipeOut(Isolate* isolate) {
}
template <typename ObjectVisitor>
-void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
@@ -434,49 +413,6 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
}
}
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-void Operand::set_modrm(int mod, Register rm_reg) {
- DCHECK(is_uint2(mod));
- buf_[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- rex_ |= rm_reg.high_bit();
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK_EQ(len_, 1);
- DCHECK(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- DCHECK(index != rsp || base == rsp || base == r12);
- buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- rex_ |= index.high_bit() << 1 | base.high_bit();
- len_ = 2;
-}
-
-void Operand::set_disp8(int disp) {
- DCHECK(is_int8(disp));
- DCHECK(len_ == 1 || len_ == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int8_t);
-}
-
-void Operand::set_disp32(int disp) {
- DCHECK(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
-}
-
-void Operand::set_disp64(int64_t disp) {
- DCHECK_EQ(1, len_);
- int64_t* p = reinterpret_cast<int64_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(disp);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 38cbfc78d9..0ec50147fd 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -55,7 +55,7 @@ bool OSHasAVXSupport() {
size_t buffer_size = arraysize(buffer);
int ctl_name[] = {CTL_KERN, KERN_OSRELEASE};
if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
- V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+ FATAL("V8 failed to get kernel version");
}
// The buffer now contains a string of the form XX.YY.ZZ, where
// XX is the major kernel version component.
@@ -127,26 +127,26 @@ Address RelocInfo::embedded_address() const { return Memory::Address_at(pc_); }
uint32_t RelocInfo::embedded_size() const { return Memory::uint32_at(pc_); }
-void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
+void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(Address));
+ Assembler::FlushICache(pc_, sizeof(Address));
}
}
-void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
+void RelocInfo::set_embedded_size(uint32_t size,
ICacheFlushMode icache_flush_mode) {
Memory::uint32_at(pc_) = size;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate, pc_, sizeof(uint32_t));
+ Assembler::FlushICache(pc_, sizeof(uint32_t));
}
}
-void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
+void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- set_embedded_address(isolate, address, icache_flush_mode);
+ set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@@ -157,133 +157,189 @@ Address RelocInfo::js_to_wasm_address() const {
// -----------------------------------------------------------------------------
// Implementation of Operand
-Operand::Operand(Register base, int32_t disp) : rex_(0) {
- len_ = 1;
- if (base == rsp || base == r12) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
+namespace {
+class OperandBuilder {
+ public:
+ OperandBuilder(Register base, int32_t disp) {
+ if (base == rsp || base == r12) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
- if (disp == 0 && base != rbp && base != r13) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
+ if (disp == 0 && base != rbp && base != r13) {
+ set_modrm(0, base);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
}
-}
+ OperandBuilder(Register base, Register index, ScaleFactor scale,
+ int32_t disp) {
+ DCHECK(index != rsp);
+ set_sib(scale, index, base);
+ if (disp == 0 && base != rbp && base != r13) {
+ // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+ // possibly set by set_sib.
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, rsp);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, rsp);
+ set_disp32(disp);
+ }
+ }
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- DCHECK(index != rsp);
- len_ = 1;
- set_sib(scale, index, base);
- if (disp == 0 && base != rbp && base != r13) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
+ OperandBuilder(Register index, ScaleFactor scale, int32_t disp) {
+ DCHECK(index != rsp);
set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
+ set_sib(scale, index, rbp);
set_disp32(disp);
}
-}
+ OperandBuilder(Label* label, int addend) {
+ data_.addend = addend;
+ DCHECK_NOT_NULL(label);
+ DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
+ set_modrm(0, rbp);
+ set_disp64(reinterpret_cast<intptr_t>(label));
+ }
+
+ OperandBuilder(Operand operand, int32_t offset) {
+ DCHECK_GE(operand.data().len, 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.data().buf[0];
+ DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless =
+ (mode == 0) && (base_reg == 0x05); // No base or RIP base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = *bit_cast<const int32_t*>(&operand.data().buf[disp_offset]);
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
+ }
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- DCHECK(index != rsp);
- len_ = 1;
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
-}
-
-
-Operand::Operand(Label* label) : rex_(0), len_(1) {
- DCHECK_NOT_NULL(label);
- set_modrm(0, rbp);
- set_disp64(reinterpret_cast<intptr_t>(label));
-}
-
-
-Operand::Operand(const Operand& operand, int32_t offset) {
- DCHECK_GE(operand.len_, 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.buf_[0];
- DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *bit_cast<const int32_t*>(&operand.buf_[disp_offset]);
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
+ // Write new operand with same registers, but with modified displacement.
+ DCHECK(offset >= 0 ? disp_value + offset > disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ data_.rex = operand.data().rex;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+ data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
+ data_.len = disp_offset + 4;
+ Memory::int32_at(&data_.buf[disp_offset]) = disp_value;
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
+ data_.len = disp_offset + 1;
+ data_.buf[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ data_.buf[0] = (modrm & 0x3F); // Mode 0.
+ data_.len = disp_offset;
+ }
+ if (has_sib) {
+ data_.buf[1] = operand.data().buf[1];
+ }
}
- // Write new operand with same registers, but with modified displacement.
- DCHECK(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- rex_ = operand.rex_;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- buf_[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
- len_ = disp_offset + 4;
- Memory::int32_at(&buf_[disp_offset]) = disp_value;
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- buf_[0] = (modrm & 0x3F) | 0x40; // Mode 1.
- len_ = disp_offset + 1;
- buf_[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- buf_[0] = (modrm & 0x3F); // Mode 0.
- len_ = disp_offset;
+ void set_modrm(int mod, Register rm_reg) {
+ DCHECK(is_uint2(mod));
+ data_.buf[0] = mod << 6 | rm_reg.low_bits();
+ // Set REX.B to the high bit of rm.code().
+ data_.rex |= rm_reg.high_bit();
}
- if (has_sib) {
- buf_[1] = operand.buf_[1];
+
+ void set_sib(ScaleFactor scale, Register index, Register base) {
+ DCHECK_EQ(data_.len, 1);
+ DCHECK(is_uint2(scale));
+ // Use SIB with no index register only for base rsp or r12. Otherwise we
+ // would skip the SIB byte entirely.
+ DCHECK(index != rsp || base == rsp || base == r12);
+ data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
+ data_.rex |= index.high_bit() << 1 | base.high_bit();
+ data_.len = 2;
}
-}
+ void set_disp8(int disp) {
+ DCHECK(is_int8(disp));
+ DCHECK(data_.len == 1 || data_.len == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(int8_t);
+ }
+
+ void set_disp32(int disp) {
+ DCHECK(data_.len == 1 || data_.len == 2);
+ int32_t* p = reinterpret_cast<int32_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(int32_t);
+ }
+
+ void set_disp64(int64_t disp) {
+ DCHECK_EQ(1, data_.len);
+ int64_t* p = reinterpret_cast<int64_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(disp);
+ }
+
+ const Operand::Data& data() const { return data_; }
+
+ private:
+ Operand::Data data_;
+};
+} // namespace
+
+Operand::Operand(Register base, int32_t disp)
+ : data_(OperandBuilder(base, disp).data()) {}
+
+Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp)
+ : data_(OperandBuilder(base, index, scale, disp).data()) {}
+
+Operand::Operand(Register index, ScaleFactor scale, int32_t disp)
+ : data_(OperandBuilder(index, scale, disp).data()) {}
+
+Operand::Operand(Label* label, int addend)
+ : data_(OperandBuilder(label, addend).data()) {}
+
+Operand::Operand(Operand operand, int32_t offset)
+ : data_(OperandBuilder(operand, offset).data()) {}
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
- DCHECK_NE(buf_[0] & 0xC0, 0xC0); // Always a memory operand.
- // Start with only low three bits of base register. Initial decoding doesn't
- // distinguish on the REX.B bit.
- int base_code = buf_[0] & 0x07;
+ DCHECK_NE(data_.buf[0] & 0xC0, 0xC0); // Always a memory operand.
+ // Start with only low three bits of base register. Initial decoding
+ // doesn't distinguish on the REX.B bit.
+ int base_code = data_.buf[0] & 0x07;
if (base_code == rsp.code()) {
// SIB byte present in buf_[1].
// Check the index register from the SIB byte + REX.X prefix.
- int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
+ int index_code = ((data_.buf[1] >> 3) & 0x07) | ((data_.rex & 0x02) << 2);
// Index code (including REX.X) of 0x04 (rsp) means no index register.
if (index_code != rsp.code() && index_code == code) return true;
// Add REX.B to get the full base register code.
- base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
+ base_code = (data_.buf[1] & 0x07) | ((data_.rex & 0x01) << 3);
// A base register of 0x05 (rbp) with mod = 0 means no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ if (base_code == rbp.code() && ((data_.buf[0] & 0xC0) == 0)) return false;
return code == base_code;
} else {
// A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
// no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- base_code |= ((rex_ & 0x01) << 3);
+ if (base_code == rbp.code() && ((data_.buf[0] & 0xC0) == 0)) return false;
+ base_code |= ((data_.rex & 0x01) << 3);
return code == base_code;
}
}
@@ -527,22 +583,22 @@ void Assembler::GrowBuffer() {
DCHECK(!buffer_overflow());
}
-
-void Assembler::emit_operand(int code, const Operand& adr) {
+void Assembler::emit_operand(int code, Operand adr) {
DCHECK(is_uint3(code));
- const unsigned length = adr.len_;
+ const unsigned length = adr.data().len;
DCHECK_GT(length, 0);
// Emit updated ModR/M byte containing the given register.
- DCHECK_EQ(adr.buf_[0] & 0x38, 0);
- *pc_++ = adr.buf_[0] | code << 3;
+ DCHECK_EQ(adr.data().buf[0] & 0x38, 0);
+ *pc_++ = adr.data().buf[0] | code << 3;
// Recognize RIP relative addressing.
- if (adr.buf_[0] == 5) {
+ if (adr.data().buf[0] == 5) {
DCHECK_EQ(9u, length);
- Label* label = *bit_cast<Label* const*>(&adr.buf_[1]);
+ Label* label = *bit_cast<Label* const*>(&adr.data().buf[1]);
if (label->is_bound()) {
- int offset = label->pos() - pc_offset() - sizeof(int32_t);
+ int offset =
+ label->pos() - pc_offset() - sizeof(int32_t) + adr.data().addend;
DCHECK_GE(0, offset);
emitl(offset);
} else if (label->is_linked()) {
@@ -556,17 +612,14 @@ void Assembler::emit_operand(int code, const Operand& adr) {
}
} else {
// Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) *pc_++ = adr.buf_[i];
+ for (unsigned i = 1; i < length; i++) *pc_++ = adr.data().buf[i];
}
}
// Assembler Instruction implementations.
-void Assembler::arithmetic_op(byte opcode,
- Register reg,
- const Operand& op,
- int size) {
+void Assembler::arithmetic_op(byte opcode, Register reg, Operand op, int size) {
EnsureSpace ensure_space(this);
emit_rex(reg, op, size);
emit(opcode);
@@ -610,10 +663,7 @@ void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
}
}
-
-void Assembler::arithmetic_op_16(byte opcode,
- Register reg,
- const Operand& rm_reg) {
+void Assembler::arithmetic_op_16(byte opcode, Register reg, Operand rm_reg) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg, rm_reg);
@@ -621,8 +671,7 @@ void Assembler::arithmetic_op_16(byte opcode,
emit_operand(reg, rm_reg);
}
-
-void Assembler::arithmetic_op_8(byte opcode, Register reg, const Operand& op) {
+void Assembler::arithmetic_op_8(byte opcode, Register reg, Operand op) {
EnsureSpace ensure_space(this);
if (!reg.is_byte_register()) {
emit_rex_32(reg, op);
@@ -676,10 +725,8 @@ void Assembler::immediate_arithmetic_op(byte subcode,
}
}
-void Assembler::immediate_arithmetic_op(byte subcode,
- const Operand& dst,
- Immediate src,
- int size) {
+void Assembler::immediate_arithmetic_op(byte subcode, Operand dst,
+ Immediate src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
@@ -714,9 +761,7 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
}
}
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
+void Assembler::immediate_arithmetic_op_16(byte subcode, Operand dst,
Immediate src) {
EnsureSpace ensure_space(this);
emit(0x66); // Operand size override prefix.
@@ -732,9 +777,7 @@ void Assembler::immediate_arithmetic_op_16(byte subcode,
}
}
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
+void Assembler::immediate_arithmetic_op_8(byte subcode, Operand dst,
Immediate src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
@@ -813,8 +856,7 @@ void Assembler::shift(Operand dst, int subcode, int size) {
emit_operand(subcode, dst);
}
-
-void Assembler::bt(const Operand& dst, Register src) {
+void Assembler::bt(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
emit(0x0F);
@@ -822,8 +864,7 @@ void Assembler::bt(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::bts(const Operand& dst, Register src) {
+void Assembler::bts(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
emit(0x0F);
@@ -840,8 +881,7 @@ void Assembler::bsrl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsrl(Register dst, const Operand& src) {
+void Assembler::bsrl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -858,8 +898,7 @@ void Assembler::bsrq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsrq(Register dst, const Operand& src) {
+void Assembler::bsrq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -876,8 +915,7 @@ void Assembler::bsfl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsfl(Register dst, const Operand& src) {
+void Assembler::bsfl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -894,8 +932,7 @@ void Assembler::bsfq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::bsfq(Register dst, const Operand& src) {
+void Assembler::bsfq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -912,7 +949,7 @@ void Assembler::pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
emit(shuffle);
}
-void Assembler::pshufw(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshufw(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -990,8 +1027,7 @@ void Assembler::call(Register adr) {
emit_modrm(0x2, adr);
}
-
-void Assembler::call(const Operand& op) {
+void Assembler::call(Operand op) {
EnsureSpace ensure_space(this);
// Opcode: FF /2 m64.
emit_optional_rex_32(op);
@@ -1049,8 +1085,7 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+void Assembler::cmovq(Condition cc, Register dst, Operand src) {
if (cc == always) {
movq(dst, src);
} else if (cc == never) {
@@ -1081,8 +1116,7 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+void Assembler::cmovl(Condition cc, Register dst, Operand src) {
if (cc == always) {
movl(dst, src);
} else if (cc == never) {
@@ -1110,7 +1144,7 @@ void Assembler::lock() {
emit(0xF0);
}
-void Assembler::cmpxchgb(const Operand& dst, Register src) {
+void Assembler::cmpxchgb(Operand dst, Register src) {
EnsureSpace ensure_space(this);
if (!src.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -1123,7 +1157,7 @@ void Assembler::cmpxchgb(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::cmpxchgw(const Operand& dst, Register src) {
+void Assembler::cmpxchgw(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -1132,7 +1166,7 @@ void Assembler::cmpxchgw(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::emit_cmpxchg(const Operand& dst, Register src, int size) {
+void Assembler::emit_cmpxchg(Operand dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, dst, size);
emit(0x0F);
@@ -1168,8 +1202,7 @@ void Assembler::emit_dec(Register dst, int size) {
emit_modrm(0x1, dst);
}
-
-void Assembler::emit_dec(const Operand& dst, int size) {
+void Assembler::emit_dec(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xFF);
@@ -1187,8 +1220,7 @@ void Assembler::decb(Register dst) {
emit_modrm(0x1, dst);
}
-
-void Assembler::decb(const Operand& dst) {
+void Assembler::decb(Operand dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xFE);
@@ -1233,8 +1265,7 @@ void Assembler::emit_imul(Register src, int size) {
emit_modrm(0x5, src);
}
-
-void Assembler::emit_imul(const Operand& src, int size) {
+void Assembler::emit_imul(Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, size);
emit(0xF7);
@@ -1250,8 +1281,7 @@ void Assembler::emit_imul(Register dst, Register src, int size) {
emit_modrm(dst, src);
}
-
-void Assembler::emit_imul(Register dst, const Operand& src, int size) {
+void Assembler::emit_imul(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x0F);
@@ -1274,9 +1304,7 @@ void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
}
}
-
-void Assembler::emit_imul(Register dst, const Operand& src, Immediate imm,
- int size) {
+void Assembler::emit_imul(Register dst, Operand src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
@@ -1298,8 +1326,7 @@ void Assembler::emit_inc(Register dst, int size) {
emit_modrm(0x0, dst);
}
-
-void Assembler::emit_inc(const Operand& dst, int size) {
+void Assembler::emit_inc(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xFF);
@@ -1483,8 +1510,7 @@ void Assembler::jmp(Register target) {
emit_modrm(0x4, target);
}
-
-void Assembler::jmp(const Operand& src) {
+void Assembler::jmp(Operand src) {
EnsureSpace ensure_space(this);
// Opcode FF/4 m64.
emit_optional_rex_32(src);
@@ -1492,8 +1518,7 @@ void Assembler::jmp(const Operand& src) {
emit_operand(0x4, src);
}
-
-void Assembler::emit_lea(Register dst, const Operand& src, int size) {
+void Assembler::emit_lea(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x8D);
@@ -1529,8 +1554,7 @@ void Assembler::leave() {
emit(0xC9);
}
-
-void Assembler::movb(Register dst, const Operand& src) {
+void Assembler::movb(Register dst, Operand src) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -1553,8 +1577,7 @@ void Assembler::movb(Register dst, Immediate imm) {
emit(imm.value_);
}
-
-void Assembler::movb(const Operand& dst, Register src) {
+void Assembler::movb(Operand dst, Register src) {
EnsureSpace ensure_space(this);
if (!src.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -1566,8 +1589,7 @@ void Assembler::movb(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::movb(const Operand& dst, Immediate imm) {
+void Assembler::movb(Operand dst, Immediate imm) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xC6);
@@ -1575,8 +1597,7 @@ void Assembler::movb(const Operand& dst, Immediate imm) {
emit(static_cast<byte>(imm.value_));
}
-
-void Assembler::movw(Register dst, const Operand& src) {
+void Assembler::movw(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -1584,8 +1605,7 @@ void Assembler::movw(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::movw(const Operand& dst, Register src) {
+void Assembler::movw(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -1593,8 +1613,7 @@ void Assembler::movw(const Operand& dst, Register src) {
emit_operand(src, dst);
}
-
-void Assembler::movw(const Operand& dst, Immediate imm) {
+void Assembler::movw(Operand dst, Immediate imm) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst);
@@ -1604,8 +1623,7 @@ void Assembler::movw(const Operand& dst, Immediate imm) {
emit(static_cast<byte>(imm.value_ >> 8));
}
-
-void Assembler::emit_mov(Register dst, const Operand& src, int size) {
+void Assembler::emit_mov(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x8B);
@@ -1626,8 +1644,7 @@ void Assembler::emit_mov(Register dst, Register src, int size) {
}
}
-
-void Assembler::emit_mov(const Operand& dst, Register src, int size) {
+void Assembler::emit_mov(Operand dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, dst, size);
emit(0x89);
@@ -1648,8 +1665,7 @@ void Assembler::emit_mov(Register dst, Immediate value, int size) {
emit(value);
}
-
-void Assembler::emit_mov(const Operand& dst, Immediate value, int size) {
+void Assembler::emit_mov(Operand dst, Immediate value, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xC7);
@@ -1689,7 +1705,7 @@ void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
// Loads the ip-relative location of the src label into the target location
// (as a 32-bit offset sign extended to 64-bit).
-void Assembler::movl(const Operand& dst, Label* src) {
+void Assembler::movl(Operand dst, Label* src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xC7);
@@ -1723,8 +1739,7 @@ void Assembler::movsxbl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::movsxbl(Register dst, const Operand& src) {
+void Assembler::movsxbl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -1732,8 +1747,7 @@ void Assembler::movsxbl(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::movsxbq(Register dst, const Operand& src) {
+void Assembler::movsxbq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -1757,8 +1771,7 @@ void Assembler::movsxwl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::movsxwl(Register dst, const Operand& src) {
+void Assembler::movsxwl(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -1766,8 +1779,7 @@ void Assembler::movsxwl(Register dst, const Operand& src) {
emit_operand(dst, src);
}
-
-void Assembler::movsxwq(Register dst, const Operand& src) {
+void Assembler::movsxwq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
@@ -1790,16 +1802,14 @@ void Assembler::movsxlq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::movsxlq(Register dst, const Operand& src) {
+void Assembler::movsxlq(Register dst, Operand src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x63);
emit_operand(dst, src);
}
-
-void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
+void Assembler::emit_movzxb(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1825,8 +1835,7 @@ void Assembler::emit_movzxb(Register dst, Register src, int size) {
emit_modrm(dst, src);
}
-
-void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
+void Assembler::emit_movzxw(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1878,8 +1887,7 @@ void Assembler::mull(Register src) {
emit_modrm(0x4, src);
}
-
-void Assembler::mull(const Operand& src) {
+void Assembler::mull(Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xF7);
@@ -1902,8 +1910,7 @@ void Assembler::emit_neg(Register dst, int size) {
emit_modrm(0x3, dst);
}
-
-void Assembler::emit_neg(const Operand& dst, int size) {
+void Assembler::emit_neg(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
@@ -1924,8 +1931,7 @@ void Assembler::emit_not(Register dst, int size) {
emit_modrm(0x2, dst);
}
-
-void Assembler::emit_not(const Operand& dst, int size) {
+void Assembler::emit_not(Operand dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
emit(0xF7);
@@ -1953,6 +1959,7 @@ void Assembler::Nop(int n) {
switch (n) {
case 2:
emit(0x66);
+ V8_FALLTHROUGH;
case 1:
emit(0x90);
return;
@@ -1969,6 +1976,7 @@ void Assembler::Nop(int n) {
return;
case 6:
emit(0x66);
+ V8_FALLTHROUGH;
case 5:
emit(0x0F);
emit(0x1F);
@@ -1989,12 +1997,15 @@ void Assembler::Nop(int n) {
case 11:
emit(0x66);
n--;
+ V8_FALLTHROUGH;
case 10:
emit(0x66);
n--;
+ V8_FALLTHROUGH;
case 9:
emit(0x66);
n--;
+ V8_FALLTHROUGH;
case 8:
emit(0x0F);
emit(0x1F);
@@ -2016,8 +2027,7 @@ void Assembler::popq(Register dst) {
emit(0x58 | dst.low_bits());
}
-
-void Assembler::popq(const Operand& dst) {
+void Assembler::popq(Operand dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x8F);
@@ -2037,8 +2047,7 @@ void Assembler::pushq(Register src) {
emit(0x50 | src.low_bits());
}
-
-void Assembler::pushq(const Operand& src) {
+void Assembler::pushq(Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xFF);
@@ -2125,7 +2134,7 @@ void Assembler::shrd(Register dst, Register src) {
emit_modrm(src, dst);
}
-void Assembler::xchgb(Register reg, const Operand& op) {
+void Assembler::xchgb(Register reg, Operand op) {
EnsureSpace ensure_space(this);
if (!reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -2137,7 +2146,7 @@ void Assembler::xchgb(Register reg, const Operand& op) {
emit_operand(reg, op);
}
-void Assembler::xchgw(Register reg, const Operand& op) {
+void Assembler::xchgw(Register reg, Operand op) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg, op);
@@ -2162,8 +2171,7 @@ void Assembler::emit_xchg(Register dst, Register src, int size) {
}
}
-
-void Assembler::emit_xchg(Register dst, const Operand& src, int size) {
+void Assembler::emit_xchg(Register dst, Operand src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
emit(0x87);
@@ -2193,6 +2201,12 @@ void Assembler::store_rax(ExternalReference ref) {
store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
}
+void Assembler::sub_sp_32(uint32_t imm) {
+ emit_rex_64();
+ emit(0x81); // using a literal 32-bit immediate.
+ emit_modrm(0x5, rsp);
+ emitl(imm);
+}
void Assembler::testb(Register dst, Register src) {
EnsureSpace ensure_space(this);
@@ -2204,13 +2218,12 @@ void Assembler::testb(Register reg, Immediate mask) {
emit_test(reg, mask, sizeof(int8_t));
}
-void Assembler::testb(const Operand& op, Immediate mask) {
+void Assembler::testb(Operand op, Immediate mask) {
DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
emit_test(op, mask, sizeof(int8_t));
}
-
-void Assembler::testb(const Operand& op, Register reg) {
+void Assembler::testb(Operand op, Register reg) {
emit_test(op, reg, sizeof(int8_t));
}
@@ -2222,11 +2235,11 @@ void Assembler::testw(Register reg, Immediate mask) {
emit_test(reg, mask, sizeof(int16_t));
}
-void Assembler::testw(const Operand& op, Immediate mask) {
+void Assembler::testw(Operand op, Immediate mask) {
emit_test(op, mask, sizeof(int16_t));
}
-void Assembler::testw(const Operand& op, Register reg) {
+void Assembler::testw(Operand op, Register reg) {
emit_test(op, reg, sizeof(int16_t));
}
@@ -2285,7 +2298,7 @@ void Assembler::emit_test(Register reg, Immediate mask, int size) {
}
}
-void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
+void Assembler::emit_test(Operand op, Immediate mask, int size) {
if (is_uint8(mask.value_)) {
size = sizeof(int8_t);
} else if (is_uint16(mask.value_)) {
@@ -2313,7 +2326,7 @@ void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
}
}
-void Assembler::emit_test(const Operand& op, Register reg, int size) {
+void Assembler::emit_test(Operand op, Register reg, int size) {
EnsureSpace ensure_space(this);
if (size == sizeof(int16_t)) {
emit(0x66);
@@ -2372,32 +2385,28 @@ void Assembler::fldln2() {
emit(0xED);
}
-
-void Assembler::fld_s(const Operand& adr) {
+void Assembler::fld_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(0, adr);
}
-
-void Assembler::fld_d(const Operand& adr) {
+void Assembler::fld_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(0, adr);
}
-
-void Assembler::fstp_s(const Operand& adr) {
+void Assembler::fstp_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(3, adr);
}
-
-void Assembler::fstp_d(const Operand& adr) {
+void Assembler::fstp_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDD);
@@ -2411,32 +2420,28 @@ void Assembler::fstp(int index) {
emit_farith(0xDD, 0xD8, index);
}
-
-void Assembler::fild_s(const Operand& adr) {
+void Assembler::fild_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(0, adr);
}
-
-void Assembler::fild_d(const Operand& adr) {
+void Assembler::fild_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(5, adr);
}
-
-void Assembler::fistp_s(const Operand& adr) {
+void Assembler::fistp_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(3, adr);
}
-
-void Assembler::fisttp_s(const Operand& adr) {
+void Assembler::fisttp_s(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
@@ -2444,8 +2449,7 @@ void Assembler::fisttp_s(const Operand& adr) {
emit_operand(1, adr);
}
-
-void Assembler::fisttp_d(const Operand& adr) {
+void Assembler::fisttp_d(Operand adr) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
@@ -2453,16 +2457,14 @@ void Assembler::fisttp_d(const Operand& adr) {
emit_operand(1, adr);
}
-
-void Assembler::fist_s(const Operand& adr) {
+void Assembler::fist_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(2, adr);
}
-
-void Assembler::fistp_d(const Operand& adr) {
+void Assembler::fistp_d(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDF);
@@ -2544,8 +2546,7 @@ void Assembler::fsub(int i) {
emit_farith(0xDC, 0xE8, i);
}
-
-void Assembler::fisub_s(const Operand& adr) {
+void Assembler::fisub_s(Operand adr) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(adr);
emit(0xDA);
@@ -2723,8 +2724,7 @@ void Assembler::andps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::andps(XMMRegister dst, const Operand& src) {
+void Assembler::andps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2741,8 +2741,7 @@ void Assembler::orps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::orps(XMMRegister dst, const Operand& src) {
+void Assembler::orps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2760,8 +2759,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::xorps(XMMRegister dst, const Operand& src) {
+void Assembler::xorps(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -2779,8 +2777,7 @@ void Assembler::addps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::addps(XMMRegister dst, const Operand& src) {
+void Assembler::addps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2797,8 +2794,7 @@ void Assembler::subps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subps(XMMRegister dst, const Operand& src) {
+void Assembler::subps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2815,8 +2811,7 @@ void Assembler::mulps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulps(XMMRegister dst, const Operand& src) {
+void Assembler::mulps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2833,8 +2828,7 @@ void Assembler::divps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divps(XMMRegister dst, const Operand& src) {
+void Assembler::divps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2855,8 +2849,7 @@ void Assembler::movd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movd(XMMRegister dst, const Operand& src) {
+void Assembler::movd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2919,8 +2912,7 @@ void Assembler::movq(XMMRegister dst, XMMRegister src) {
}
}
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+void Assembler::movdqa(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(src, dst);
@@ -2929,8 +2921,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+void Assembler::movdqa(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(dst, src);
@@ -2939,8 +2930,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movdqu(const Operand& dst, XMMRegister src) {
+void Assembler::movdqu(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_rex_64(src, dst);
@@ -2949,8 +2939,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
-void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+void Assembler::movdqu(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_rex_64(dst, src);
@@ -2986,7 +2975,7 @@ void Assembler::pextrb(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+void Assembler::pextrb(Operand dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
@@ -3010,7 +2999,7 @@ void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t imm8) {
+void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3034,7 +3023,7 @@ void Assembler::pextrw(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+void Assembler::pextrw(Operand dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
@@ -3059,7 +3048,7 @@ void Assembler::pextrd(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+void Assembler::pextrd(Operand dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3083,8 +3072,7 @@ void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-
-void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+void Assembler::pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3108,7 +3096,7 @@ void Assembler::pinsrb(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t imm8) {
+void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3133,7 +3121,7 @@ void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
emit(imm8);
}
-void Assembler::movsd(const Operand& dst, XMMRegister src) {
+void Assembler::movsd(Operand dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
@@ -3154,8 +3142,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
+void Assembler::movsd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
@@ -3214,8 +3201,7 @@ void Assembler::movapd(XMMRegister dst, XMMRegister src) {
}
}
-
-void Assembler::movupd(XMMRegister dst, const Operand& src) {
+void Assembler::movupd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3224,7 +3210,7 @@ void Assembler::movupd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::movupd(const Operand& dst, XMMRegister src) {
+void Assembler::movupd(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -3242,8 +3228,7 @@ void Assembler::addss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::addss(XMMRegister dst, const Operand& src) {
+void Assembler::addss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3262,8 +3247,7 @@ void Assembler::subss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subss(XMMRegister dst, const Operand& src) {
+void Assembler::subss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3282,8 +3266,7 @@ void Assembler::mulss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulss(XMMRegister dst, const Operand& src) {
+void Assembler::mulss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3302,8 +3285,7 @@ void Assembler::divss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divss(XMMRegister dst, const Operand& src) {
+void Assembler::divss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3322,8 +3304,7 @@ void Assembler::maxss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::maxss(XMMRegister dst, const Operand& src) {
+void Assembler::maxss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3342,8 +3323,7 @@ void Assembler::minss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minss(XMMRegister dst, const Operand& src) {
+void Assembler::minss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3362,8 +3342,7 @@ void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtss(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3382,8 +3361,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+void Assembler::ucomiss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -3403,8 +3381,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
+void Assembler::movss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
@@ -3414,8 +3391,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::movss(const Operand& src, XMMRegister dst) {
+void Assembler::movss(Operand src, XMMRegister dst) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
@@ -3518,7 +3494,7 @@ void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
emit(cmp);
}
-void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
+void Assembler::cmpps(XMMRegister dst, Operand src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -3537,7 +3513,7 @@ void Assembler::cmppd(XMMRegister dst, XMMRegister src, int8_t cmp) {
emit(cmp);
}
-void Assembler::cmppd(XMMRegister dst, const Operand& src, int8_t cmp) {
+void Assembler::cmppd(XMMRegister dst, Operand src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x66);
@@ -3547,7 +3523,7 @@ void Assembler::cmppd(XMMRegister dst, const Operand& src, int8_t cmp) {
emit(cmp);
}
-void Assembler::cvttss2si(Register dst, const Operand& src) {
+void Assembler::cvttss2si(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3568,8 +3544,7 @@ void Assembler::cvttss2si(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
+void Assembler::cvttsd2si(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3601,8 +3576,7 @@ void Assembler::cvttss2siq(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvttss2siq(Register dst, const Operand& src) {
+void Assembler::cvttss2siq(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3623,8 +3597,7 @@ void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvttsd2siq(Register dst, const Operand& src) {
+void Assembler::cvttsd2siq(Register dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3634,8 +3607,7 @@ void Assembler::cvttsd2siq(Register dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtlsi2sd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3656,8 +3628,7 @@ void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtlsi2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtlsi2ss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3677,8 +3648,7 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtqsi2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtqsi2ss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3699,8 +3669,7 @@ void Assembler::cvtqsi2ss(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtqsi2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtqsi2sd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3732,8 +3701,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3754,8 +3722,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
+void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3797,8 +3764,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
+void Assembler::addsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3817,8 +3783,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+void Assembler::mulsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3837,8 +3802,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::subsd(XMMRegister dst, const Operand& src) {
+void Assembler::subsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3857,8 +3821,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::divsd(XMMRegister dst, const Operand& src) {
+void Assembler::divsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3877,8 +3840,7 @@ void Assembler::maxsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+void Assembler::maxsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3897,8 +3859,7 @@ void Assembler::minsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::minsd(XMMRegister dst, const Operand& src) {
+void Assembler::minsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3917,8 +3878,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::andpd(XMMRegister dst, const Operand& src) {
+void Assembler::andpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3937,8 +3897,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::orpd(XMMRegister dst, const Operand& src) {
+void Assembler::orpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3958,8 +3917,7 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::xorpd(XMMRegister dst, const Operand& src) {
+void Assembler::xorpd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3980,8 +3938,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtsd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4000,7 +3957,7 @@ void Assembler::haddps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::haddps(XMMRegister dst, const Operand& src) {
+void Assembler::haddps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -4019,8 +3976,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+void Assembler::ucomisd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -4100,7 +4056,7 @@ void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::punpckldq(XMMRegister dst, const Operand& src) {
+void Assembler::punpckldq(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -4129,9 +4085,8 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
@@ -4149,9 +4104,8 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ Operand src2) {
DCHECK(IsEnabled(FMA3));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
@@ -4169,8 +4123,7 @@ void Assembler::vmovd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::vmovd(XMMRegister dst, const Operand& src) {
+void Assembler::vmovd(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW0);
@@ -4198,8 +4151,7 @@ void Assembler::vmovq(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::vmovq(XMMRegister dst, const Operand& src) {
+void Assembler::vmovq(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW1);
@@ -4227,9 +4179,8 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
- VexW w) {
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
@@ -4247,9 +4198,7 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
@@ -4267,9 +4216,7 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
@@ -4286,8 +4233,7 @@ void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::vucomiss(XMMRegister dst, const Operand& src) {
+void Assembler::vucomiss(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
@@ -4305,9 +4251,7 @@ void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
-void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_vex_prefix(dst, src1, src2, kLIG, kF3, k0F, kWIG);
@@ -4324,8 +4268,7 @@ void Assembler::bmi1q(byte op, Register reg, Register vreg, Register rm) {
emit_modrm(reg, rm);
}
-
-void Assembler::bmi1q(byte op, Register reg, Register vreg, const Operand& rm) {
+void Assembler::bmi1q(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW1);
@@ -4342,8 +4285,7 @@ void Assembler::bmi1l(byte op, Register reg, Register vreg, Register rm) {
emit_modrm(reg, rm);
}
-
-void Assembler::bmi1l(byte op, Register reg, Register vreg, const Operand& rm) {
+void Assembler::bmi1l(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW0);
@@ -4362,8 +4304,7 @@ void Assembler::tzcntq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::tzcntq(Register dst, const Operand& src) {
+void Assembler::tzcntq(Register dst, Operand src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4384,8 +4325,7 @@ void Assembler::tzcntl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::tzcntl(Register dst, const Operand& src) {
+void Assembler::tzcntl(Register dst, Operand src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4406,8 +4346,7 @@ void Assembler::lzcntq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::lzcntq(Register dst, const Operand& src) {
+void Assembler::lzcntq(Register dst, Operand src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4428,8 +4367,7 @@ void Assembler::lzcntl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::lzcntl(Register dst, const Operand& src) {
+void Assembler::lzcntl(Register dst, Operand src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4450,8 +4388,7 @@ void Assembler::popcntq(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::popcntq(Register dst, const Operand& src) {
+void Assembler::popcntq(Register dst, Operand src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4472,8 +4409,7 @@ void Assembler::popcntl(Register dst, Register src) {
emit_modrm(dst, src);
}
-
-void Assembler::popcntl(Register dst, const Operand& src) {
+void Assembler::popcntl(Register dst, Operand src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -4493,9 +4429,8 @@ void Assembler::bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_modrm(reg, rm);
}
-
void Assembler::bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm) {
+ Operand rm) {
DCHECK(IsEnabled(BMI2));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, pp, k0F38, kW1);
@@ -4513,9 +4448,8 @@ void Assembler::bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_modrm(reg, rm);
}
-
void Assembler::bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm) {
+ Operand rm) {
DCHECK(IsEnabled(BMI2));
EnsureSpace ensure_space(this);
emit_vex_prefix(reg, vreg, rm, kLZ, pp, k0F38, kW0);
@@ -4535,8 +4469,7 @@ void Assembler::rorxq(Register dst, Register src, byte imm8) {
emit(imm8);
}
-
-void Assembler::rorxq(Register dst, const Operand& src, byte imm8) {
+void Assembler::rorxq(Register dst, Operand src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
Register vreg = Register::from_code<0>(); // VEX.vvvv unused
@@ -4559,8 +4492,7 @@ void Assembler::rorxl(Register dst, Register src, byte imm8) {
emit(imm8);
}
-
-void Assembler::rorxl(Register dst, const Operand& src, byte imm8) {
+void Assembler::rorxl(Register dst, Operand src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
Register vreg = Register::from_code<0>(); // VEX.vvvv unused
@@ -4584,7 +4516,7 @@ void Assembler::minps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::minps(XMMRegister dst, const Operand& src) {
+void Assembler::minps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4600,7 +4532,7 @@ void Assembler::maxps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::maxps(XMMRegister dst, const Operand& src) {
+void Assembler::maxps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4616,7 +4548,7 @@ void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+void Assembler::rcpps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4632,7 +4564,7 @@ void Assembler::rsqrtps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+void Assembler::rsqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4648,7 +4580,7 @@ void Assembler::sqrtps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+void Assembler::sqrtps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4664,7 +4596,7 @@ void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4688,7 +4620,7 @@ void Assembler::movups(XMMRegister dst, XMMRegister src) {
}
}
-void Assembler::movups(XMMRegister dst, const Operand& src) {
+void Assembler::movups(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -4696,7 +4628,7 @@ void Assembler::movups(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
-void Assembler::movups(const Operand& dst, XMMRegister src) {
+void Assembler::movups(Operand dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src, dst);
emit(0x0F);
@@ -4714,7 +4646,7 @@ void Assembler::sse2_instr(XMMRegister dst, XMMRegister src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::sse2_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse2_instr(XMMRegister dst, Operand src, byte prefix,
byte escape, byte opcode) {
EnsureSpace ensure_space(this);
emit(prefix);
@@ -4736,7 +4668,7 @@ void Assembler::ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::ssse3_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSSE3));
EnsureSpace ensure_space(this);
@@ -4760,7 +4692,7 @@ void Assembler::sse4_instr(XMMRegister dst, XMMRegister src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
+void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
byte escape1, byte escape2, byte opcode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -4772,7 +4704,7 @@ void Assembler::sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
emit_sse_operand(dst, src);
}
-void Assembler::lddqu(XMMRegister dst, const Operand& src) {
+void Assembler::lddqu(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4822,7 +4754,7 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
emit(shuffle);
}
-void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -4832,13 +4764,12 @@ void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) {
emit(shuffle);
}
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
+void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
-
-void Assembler::emit_sse_operand(Register reg, const Operand& adr) {
+void Assembler::emit_sse_operand(Register reg, Operand adr) {
emit_operand(reg, adr);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 1c838b964b..a532729d15 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -81,9 +81,6 @@ namespace internal {
V(r14) \
V(r15)
-// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
-constexpr int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
-
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
@@ -316,7 +313,7 @@ class Immediate BASE_EMBEDDED {
private:
int32_t value_;
- RelocInfo::Mode rmode_ = RelocInfo::NONE32;
+ RelocInfo::Mode rmode_ = RelocInfo::NONE;
friend class Assembler;
};
@@ -325,7 +322,7 @@ class Immediate BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Machine instruction Operands
-enum ScaleFactor {
+enum ScaleFactor : int8_t {
times_1 = 0,
times_2 = 1,
times_4 = 2,
@@ -334,9 +331,15 @@ enum ScaleFactor {
times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
};
-
-class Operand BASE_EMBEDDED {
+class Operand {
public:
+ struct Data {
+ byte rex = 0;
+ byte buf[9];
+ byte len = 1; // number of bytes of buf_ in use.
+ int8_t addend; // for rip + offset + addend.
+ };
+
// [base + disp/r]
Operand(Register base, int32_t disp);
@@ -354,10 +357,12 @@ class Operand BASE_EMBEDDED {
// Offset from existing memory operand.
// Offset is added to existing displacement as 32-bit signed values and
// this must not overflow.
- Operand(const Operand& base, int32_t offset);
+ Operand(Operand base, int32_t offset);
// [rip + disp/r]
- explicit Operand(Label* label);
+ explicit Operand(Label* label, int addend = 0);
+
+ Operand(const Operand&) = default;
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
@@ -365,33 +370,29 @@ class Operand BASE_EMBEDDED {
// Queries related to the size of the generated instruction.
// Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return rex_ != 0; }
+ bool requires_rex() const { return data_.rex != 0; }
// Size of the ModR/M, SIB and displacement parts of the generated
// instruction.
- int operand_size() const { return len_; }
-
- private:
- byte rex_;
- byte buf_[9];
- // The number of bytes of buf_ in use.
- byte len_;
-
- // Set the ModR/M byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- // set_modrm can be called before or after set_sib and set_disp*.
- inline void set_modrm(int mod, Register rm);
-
- // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
- inline void set_sib(ScaleFactor scale, Register index, Register base);
+ int operand_size() const { return data_.len; }
- // Adds operand displacement fields (offsets added to the memory address).
- // Needs to be called after set_sib, not before it.
- inline void set_disp8(int disp);
- inline void set_disp32(int disp);
- inline void set_disp64(int64_t disp); // for labels.
+ const Data& data() const { return data_; }
- friend class Assembler;
+ private:
+ const Data data_;
};
+static_assert(sizeof(Operand) <= 2 * kPointerSize,
+ "Operand must be small enough to pass it by value");
+// Unfortunately, MSVC 2015 is broken in that both is_trivially_destructible and
+// is_trivially_copy_constructible are true, but is_trivially_copyable is false.
+// (status at 2018-02-26, observed on the msvc waterfall bot).
+#if V8_CC_MSVC
+static_assert(std::is_trivially_copy_constructible<Operand>::value &&
+ std::is_trivially_destructible<Operand>::value,
+ "Operand must be trivially copyable to pass it by value");
+#else
+static_assert(IS_TRIVIALLY_COPYABLE(Operand),
+ "Operand must be trivially copyable to pass it by value");
+#endif
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
@@ -476,7 +477,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static inline Address target_address_at(Address pc, Address constant_pool);
static inline void set_target_address_at(
- Isolate* isolate, Address pc, Address constant_pool, Address target,
+ Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
@@ -486,23 +487,13 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Isolate* isolate, Address instruction_payload, Code* code,
- Address target);
+ Address instruction_payload, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Isolate* isolate, Address pc, Address target,
+ Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- static inline RelocInfo::Mode RelocInfoNone() {
- if (kPointerSize == kInt64Size) {
- return RelocInfo::NONE64;
- } else {
- DCHECK_EQ(kPointerSize, kInt32Size);
- return RelocInfo::NONE32;
- }
- }
-
inline Handle<Code> code_target_object_handle_at(Address pc);
inline Address runtime_entry_at(Address pc);
// Number of bytes taken up by the branch target in the code.
@@ -629,29 +620,29 @@ class Assembler : public AssemblerBase {
// 32 bit value, the normal push will optimize the 8 bit case.
void pushq_imm32(int32_t imm32);
void pushq(Register src);
- void pushq(const Operand& src);
+ void pushq(Operand src);
void popq(Register dst);
- void popq(const Operand& dst);
+ void popq(Operand dst);
void enter(Immediate size);
void leave();
// Moves
- void movb(Register dst, const Operand& src);
+ void movb(Register dst, Operand src);
void movb(Register dst, Immediate imm);
- void movb(const Operand& dst, Register src);
- void movb(const Operand& dst, Immediate imm);
+ void movb(Operand dst, Register src);
+ void movb(Operand dst, Immediate imm);
// Move the low 16 bits of a 64-bit register value to a 16-bit
// memory location.
- void movw(Register dst, const Operand& src);
- void movw(const Operand& dst, Register src);
- void movw(const Operand& dst, Immediate imm);
+ void movw(Register dst, Operand src);
+ void movw(Operand dst, Register src);
+ void movw(Operand dst, Immediate imm);
// Move the offset of the label location relative to the current
// position (after the move) to the destination.
- void movl(const Operand& dst, Label* src);
+ void movl(Operand dst, Label* src);
// Loads a pointer into a register with a relocation mode.
void movp(Register dst, void* ptr, RelocInfo::Mode rmode);
@@ -667,20 +658,20 @@ class Assembler : public AssemblerBase {
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE64);
+ RelocInfo::Mode rmode = RelocInfo::NONE);
void movq(Register dst, uint64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE64);
+ RelocInfo::Mode rmode = RelocInfo::NONE);
void movsxbl(Register dst, Register src);
- void movsxbl(Register dst, const Operand& src);
+ void movsxbl(Register dst, Operand src);
void movsxbq(Register dst, Register src);
- void movsxbq(Register dst, const Operand& src);
+ void movsxbq(Register dst, Operand src);
void movsxwl(Register dst, Register src);
- void movsxwl(Register dst, const Operand& src);
+ void movsxwl(Register dst, Operand src);
void movsxwq(Register dst, Register src);
- void movsxwq(Register dst, const Operand& src);
+ void movsxwq(Register dst, Operand src);
void movsxlq(Register dst, Register src);
- void movsxlq(Register dst, const Operand& src);
+ void movsxlq(Register dst, Operand src);
// Repeated moves.
@@ -696,9 +687,9 @@ class Assembler : public AssemblerBase {
// Conditional moves.
void cmovq(Condition cc, Register dst, Register src);
- void cmovq(Condition cc, Register dst, const Operand& src);
+ void cmovq(Condition cc, Register dst, Operand src);
void cmovl(Condition cc, Register dst, Register src);
- void cmovl(Condition cc, Register dst, const Operand& src);
+ void cmovl(Condition cc, Register dst, Operand src);
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
@@ -710,19 +701,15 @@ class Assembler : public AssemblerBase {
arithmetic_op_8(0x3A, dst, src);
}
- void cmpb(Register dst, const Operand& src) {
- arithmetic_op_8(0x3A, dst, src);
- }
+ void cmpb(Register dst, Operand src) { arithmetic_op_8(0x3A, dst, src); }
- void cmpb(const Operand& dst, Register src) {
- arithmetic_op_8(0x38, src, dst);
- }
+ void cmpb(Operand dst, Register src) { arithmetic_op_8(0x38, src, dst); }
- void cmpb(const Operand& dst, Immediate src) {
+ void cmpb(Operand dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
- void cmpw(const Operand& dst, Immediate src) {
+ void cmpw(Operand dst, Immediate src) {
immediate_arithmetic_op_16(0x7, dst, src);
}
@@ -730,37 +717,33 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_16(0x7, dst, src);
}
- void cmpw(Register dst, const Operand& src) {
- arithmetic_op_16(0x3B, dst, src);
- }
+ void cmpw(Register dst, Operand src) { arithmetic_op_16(0x3B, dst, src); }
void cmpw(Register dst, Register src) {
arithmetic_op_16(0x3B, dst, src);
}
- void cmpw(const Operand& dst, Register src) {
- arithmetic_op_16(0x39, src, dst);
- }
+ void cmpw(Operand dst, Register src) { arithmetic_op_16(0x39, src, dst); }
- void testb(Register reg, const Operand& op) { testb(op, reg); }
+ void testb(Register reg, Operand op) { testb(op, reg); }
- void testw(Register reg, const Operand& op) { testw(op, reg); }
+ void testw(Register reg, Operand op) { testw(op, reg); }
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
void decb(Register dst);
- void decb(const Operand& dst);
+ void decb(Operand dst);
// Lock prefix.
void lock();
- void xchgb(Register reg, const Operand& op);
- void xchgw(Register reg, const Operand& op);
+ void xchgb(Register reg, Operand op);
+ void xchgw(Register reg, Operand op);
- void cmpxchgb(const Operand& dst, Register src);
- void cmpxchgw(const Operand& dst, Register src);
+ void cmpxchgb(Operand dst, Register src);
+ void cmpxchgw(Operand dst, Register src);
// Sign-extends rax into rdx:rax.
void cqo();
@@ -769,7 +752,7 @@ class Assembler : public AssemblerBase {
// Multiply eax by src, put the result in edx:eax.
void mull(Register src);
- void mull(const Operand& src);
+ void mull(Operand src);
// Multiply rax by src, put the result in rdx:rax.
void mulq(Register src);
@@ -825,27 +808,29 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_8(0x5, dst, src);
}
+ void sub_sp_32(uint32_t imm);
+
void testb(Register dst, Register src);
void testb(Register reg, Immediate mask);
- void testb(const Operand& op, Immediate mask);
- void testb(const Operand& op, Register reg);
+ void testb(Operand op, Immediate mask);
+ void testb(Operand op, Register reg);
void testw(Register dst, Register src);
void testw(Register reg, Immediate mask);
- void testw(const Operand& op, Immediate mask);
- void testw(const Operand& op, Register reg);
+ void testw(Operand op, Immediate mask);
+ void testw(Operand op, Register reg);
// Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(const Operand& dst, Register src);
+ void bt(Operand dst, Register src);
+ void bts(Operand dst, Register src);
void bsrq(Register dst, Register src);
- void bsrq(Register dst, const Operand& src);
+ void bsrq(Register dst, Operand src);
void bsrl(Register dst, Register src);
- void bsrl(Register dst, const Operand& src);
+ void bsrl(Register dst, Operand src);
void bsfq(Register dst, Register src);
- void bsfq(Register dst, const Operand& src);
+ void bsfq(Register dst, Operand src);
void bsfl(Register dst, Register src);
- void bsfl(Register dst, const Operand& src);
+ void bsfl(Register dst, Operand src);
// Miscellaneous
void clc();
@@ -859,7 +844,7 @@ class Assembler : public AssemblerBase {
void setcc(Condition cc, Register reg);
void pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
- void pshufw(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufw(XMMRegister dst, Operand src, uint8_t shuffle);
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -906,7 +891,7 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- void jmp(const Operand& src);
+ void jmp(Operand src);
// Conditional jumps
void j(Condition cc,
@@ -923,23 +908,23 @@ class Assembler : public AssemblerBase {
void fldpi();
void fldln2();
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
+ void fld_s(Operand adr);
+ void fld_d(Operand adr);
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
+ void fstp_s(Operand adr);
+ void fstp_d(Operand adr);
void fstp(int index);
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
+ void fild_s(Operand adr);
+ void fild_d(Operand adr);
- void fist_s(const Operand& adr);
+ void fist_s(Operand adr);
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
+ void fistp_s(Operand adr);
+ void fistp_d(Operand adr);
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
+ void fisttp_s(Operand adr);
+ void fisttp_d(Operand adr);
void fabs();
void fchs();
@@ -949,7 +934,7 @@ class Assembler : public AssemblerBase {
void fmul(int i);
void fdiv(int i);
- void fisub_s(const Operand& adr);
+ void fisub_s(Operand adr);
void faddp(int i = 1);
void fsubp(int i = 1);
@@ -988,24 +973,24 @@ class Assembler : public AssemblerBase {
// SSE instructions
void addss(XMMRegister dst, XMMRegister src);
- void addss(XMMRegister dst, const Operand& src);
+ void addss(XMMRegister dst, Operand src);
void subss(XMMRegister dst, XMMRegister src);
- void subss(XMMRegister dst, const Operand& src);
+ void subss(XMMRegister dst, Operand src);
void mulss(XMMRegister dst, XMMRegister src);
- void mulss(XMMRegister dst, const Operand& src);
+ void mulss(XMMRegister dst, Operand src);
void divss(XMMRegister dst, XMMRegister src);
- void divss(XMMRegister dst, const Operand& src);
+ void divss(XMMRegister dst, Operand src);
void maxss(XMMRegister dst, XMMRegister src);
- void maxss(XMMRegister dst, const Operand& src);
+ void maxss(XMMRegister dst, Operand src);
void minss(XMMRegister dst, XMMRegister src);
- void minss(XMMRegister dst, const Operand& src);
+ void minss(XMMRegister dst, Operand src);
void sqrtss(XMMRegister dst, XMMRegister src);
- void sqrtss(XMMRegister dst, const Operand& src);
+ void sqrtss(XMMRegister dst, Operand src);
void ucomiss(XMMRegister dst, XMMRegister src);
- void ucomiss(XMMRegister dst, const Operand& src);
+ void ucomiss(XMMRegister dst, Operand src);
void movaps(XMMRegister dst, XMMRegister src);
// Don't use this unless it's important to keep the
@@ -1014,48 +999,48 @@ class Assembler : public AssemblerBase {
// values in xmm registers.
void movss(XMMRegister dst, XMMRegister src);
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
+ void movss(XMMRegister dst, Operand src);
+ void movss(Operand dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
- void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, Operand src);
void cvttss2si(Register dst, XMMRegister src);
- void cvtlsi2ss(XMMRegister dst, const Operand& src);
+ void cvtlsi2ss(XMMRegister dst, Operand src);
void cvtlsi2ss(XMMRegister dst, Register src);
void andps(XMMRegister dst, XMMRegister src);
- void andps(XMMRegister dst, const Operand& src);
+ void andps(XMMRegister dst, Operand src);
void orps(XMMRegister dst, XMMRegister src);
- void orps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, Operand src);
void xorps(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, Operand src);
void addps(XMMRegister dst, XMMRegister src);
- void addps(XMMRegister dst, const Operand& src);
+ void addps(XMMRegister dst, Operand src);
void subps(XMMRegister dst, XMMRegister src);
- void subps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, Operand src);
void mulps(XMMRegister dst, XMMRegister src);
- void mulps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, Operand src);
void divps(XMMRegister dst, XMMRegister src);
- void divps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, Operand src);
void movmskps(Register dst, XMMRegister src);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
SIMDPrefix pp, LeadingOpcode m, VexW w);
- void vinstr(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w);
// SSE2 instructions
void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
byte opcode);
- void sse2_instr(XMMRegister dst, const Operand& src, byte prefix, byte escape,
+ void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
byte opcode);
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode); \
}
@@ -1066,8 +1051,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
}
@@ -1075,20 +1059,20 @@ class Assembler : public AssemblerBase {
#undef DECLARE_SSE2_AVX_INSTRUCTION
// SSE3
- void lddqu(XMMRegister dst, const Operand& src);
+ void lddqu(XMMRegister dst, Operand src);
// SSSE3
void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
byte escape2, byte opcode);
- void ssse3_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
+ void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1098,14 +1082,14 @@ class Assembler : public AssemblerBase {
// SSE4
void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
byte escape2, byte opcode);
- void sse4_instr(XMMRegister dst, const Operand& src, byte prefix,
- byte escape1, byte escape2, byte opcode);
+ void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
#define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2, \
opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
} \
- void instruction(XMMRegister dst, const Operand& src) { \
+ void instruction(XMMRegister dst, Operand src) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
}
@@ -1117,8 +1101,7 @@ class Assembler : public AssemblerBase {
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
} \
- void v##instruction(XMMRegister dst, XMMRegister src1, \
- const Operand& src2) { \
+ void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
}
@@ -1127,7 +1110,7 @@ class Assembler : public AssemblerBase {
#undef DECLARE_SSE34_AVX_INSTRUCTION
void movd(XMMRegister dst, Register src);
- void movd(XMMRegister dst, const Operand& src);
+ void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
@@ -1139,18 +1122,18 @@ class Assembler : public AssemblerBase {
// values in xmm registers.
void movsd(XMMRegister dst, XMMRegister src);
- void movsd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, const Operand& src);
+ void movsd(Operand dst, XMMRegister src);
+ void movsd(XMMRegister dst, Operand src);
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqa(XMMRegister dst, const Operand& src);
+ void movdqa(Operand dst, XMMRegister src);
+ void movdqa(XMMRegister dst, Operand src);
- void movdqu(const Operand& dst, XMMRegister src);
- void movdqu(XMMRegister dst, const Operand& src);
+ void movdqu(Operand dst, XMMRegister src);
+ void movdqu(XMMRegister dst, Operand src);
void movapd(XMMRegister dst, XMMRegister src);
- void movupd(XMMRegister dst, const Operand& src);
- void movupd(const Operand& dst, XMMRegister src);
+ void movupd(XMMRegister dst, Operand src);
+ void movupd(Operand dst, XMMRegister src);
void psllq(XMMRegister reg, byte imm8);
void psrlq(XMMRegister reg, byte imm8);
@@ -1161,98 +1144,96 @@ class Assembler : public AssemblerBase {
void psraw(XMMRegister reg, byte imm8);
void psrad(XMMRegister reg, byte imm8);
- void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, Operand src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttss2siq(Register dst, XMMRegister src);
- void cvttss2siq(Register dst, const Operand& src);
+ void cvttss2siq(Register dst, Operand src);
void cvttsd2siq(Register dst, XMMRegister src);
- void cvttsd2siq(Register dst, const Operand& src);
+ void cvttsd2siq(Register dst, Operand src);
- void cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void cvtlsi2sd(XMMRegister dst, Operand src);
void cvtlsi2sd(XMMRegister dst, Register src);
- void cvtqsi2ss(XMMRegister dst, const Operand& src);
+ void cvtqsi2ss(XMMRegister dst, Operand src);
void cvtqsi2ss(XMMRegister dst, Register src);
- void cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void cvtqsi2sd(XMMRegister dst, Operand src);
void cvtqsi2sd(XMMRegister dst, Register src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, const Operand& src);
+ void cvtss2sd(XMMRegister dst, Operand src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, const Operand& src);
+ void cvtsd2ss(XMMRegister dst, Operand src);
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
+ void addsd(XMMRegister dst, Operand src);
void subsd(XMMRegister dst, XMMRegister src);
- void subsd(XMMRegister dst, const Operand& src);
+ void subsd(XMMRegister dst, Operand src);
void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
+ void mulsd(XMMRegister dst, Operand src);
void divsd(XMMRegister dst, XMMRegister src);
- void divsd(XMMRegister dst, const Operand& src);
+ void divsd(XMMRegister dst, Operand src);
void maxsd(XMMRegister dst, XMMRegister src);
- void maxsd(XMMRegister dst, const Operand& src);
+ void maxsd(XMMRegister dst, Operand src);
void minsd(XMMRegister dst, XMMRegister src);
- void minsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, Operand src);
void andpd(XMMRegister dst, XMMRegister src);
- void andpd(XMMRegister dst, const Operand& src);
+ void andpd(XMMRegister dst, Operand src);
void orpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, const Operand& src);
+ void orpd(XMMRegister dst, Operand src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, const Operand& src);
+ void xorpd(XMMRegister dst, Operand src);
void sqrtsd(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, const Operand& src);
+ void sqrtsd(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src);
- void haddps(XMMRegister dst, const Operand& src);
+ void haddps(XMMRegister dst, Operand src);
void ucomisd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, const Operand& src);
+ void ucomisd(XMMRegister dst, Operand src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
void punpckldq(XMMRegister dst, XMMRegister src);
- void punpckldq(XMMRegister dst, const Operand& src);
+ void punpckldq(XMMRegister dst, Operand src);
void punpckhdq(XMMRegister dst, XMMRegister src);
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void extractps(Register dst, XMMRegister src, byte imm8);
void pextrb(Register dst, XMMRegister src, int8_t imm8);
- void pextrb(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pextrb(Operand dst, XMMRegister src, int8_t imm8);
void pextrw(Register dst, XMMRegister src, int8_t imm8);
- void pextrw(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pextrw(Operand dst, XMMRegister src, int8_t imm8);
void pextrd(Register dst, XMMRegister src, int8_t imm8);
- void pextrd(const Operand& dst, XMMRegister src, int8_t imm8);
+ void pextrd(Operand dst, XMMRegister src, int8_t imm8);
void pinsrb(XMMRegister dst, Register src, int8_t imm8);
- void pinsrb(XMMRegister dst, const Operand& src, int8_t imm8);
+ void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
void pinsrw(XMMRegister dst, Register src, int8_t imm8);
- void pinsrw(XMMRegister dst, const Operand& src, int8_t imm8);
+ void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
- void cmpps(XMMRegister dst, const Operand& src, int8_t cmp);
+ void cmpps(XMMRegister dst, Operand src, int8_t cmp);
void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp);
- void cmppd(XMMRegister dst, const Operand& src, int8_t cmp);
+ void cmppd(XMMRegister dst, Operand src, int8_t cmp);
#define SSE_CMP_P(instr, imm8) \
void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
- void instr##ps(XMMRegister dst, const Operand& src) { \
- cmpps(dst, src, imm8); \
- } \
+ void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \
void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
- void instr##pd(XMMRegister dst, const Operand& src) { cmppd(dst, src, imm8); }
+ void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
SSE_CMP_P(cmpeq, 0x0);
SSE_CMP_P(cmplt, 0x1);
@@ -1264,25 +1245,25 @@ class Assembler : public AssemblerBase {
#undef SSE_CMP_P
void minps(XMMRegister dst, XMMRegister src);
- void minps(XMMRegister dst, const Operand& src);
+ void minps(XMMRegister dst, Operand src);
void maxps(XMMRegister dst, XMMRegister src);
- void maxps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, Operand src);
void rcpps(XMMRegister dst, XMMRegister src);
- void rcpps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, Operand src);
void rsqrtps(XMMRegister dst, XMMRegister src);
- void rsqrtps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, Operand src);
void sqrtps(XMMRegister dst, XMMRegister src);
- void sqrtps(XMMRegister dst, const Operand& src);
+ void sqrtps(XMMRegister dst, Operand src);
void movups(XMMRegister dst, XMMRegister src);
- void movups(XMMRegister dst, const Operand& src);
- void movups(const Operand& dst, XMMRegister src);
+ void movups(XMMRegister dst, Operand src);
+ void movups(Operand dst, XMMRegister src);
void psrldq(XMMRegister dst, uint8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
- void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
- void cvtdq2ps(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, Operand src);
// AVX instruction
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1294,13 +1275,13 @@ class Assembler : public AssemblerBase {
void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xb9, dst, src1, src2);
}
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x99, dst, src1, src2);
}
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xa9, dst, src1, src2);
}
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xb9, dst, src1, src2);
}
void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1312,13 +1293,13 @@ class Assembler : public AssemblerBase {
void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xbb, dst, src1, src2);
}
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9b, dst, src1, src2);
}
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xab, dst, src1, src2);
}
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbb, dst, src1, src2);
}
void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1330,13 +1311,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xbd, dst, src1, src2);
}
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9d, dst, src1, src2);
}
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xad, dst, src1, src2);
}
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbd, dst, src1, src2);
}
void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1348,17 +1329,17 @@ class Assembler : public AssemblerBase {
void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0xbf, dst, src1, src2);
}
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0x9f, dst, src1, src2);
}
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xaf, dst, src1, src2);
}
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmasd(0xbf, dst, src1, src2);
}
void vfmasd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vfmasd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0x99, dst, src1, src2);
@@ -1369,13 +1350,13 @@ class Assembler : public AssemblerBase {
void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xb9, dst, src1, src2);
}
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x99, dst, src1, src2);
}
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xa9, dst, src1, src2);
}
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xb9, dst, src1, src2);
}
void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1387,13 +1368,13 @@ class Assembler : public AssemblerBase {
void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xbb, dst, src1, src2);
}
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9b, dst, src1, src2);
}
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xab, dst, src1, src2);
}
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbb, dst, src1, src2);
}
void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1405,13 +1386,13 @@ class Assembler : public AssemblerBase {
void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xbd, dst, src1, src2);
}
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9d, dst, src1, src2);
}
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xad, dst, src1, src2);
}
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbd, dst, src1, src2);
}
void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1423,34 +1404,30 @@ class Assembler : public AssemblerBase {
void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmass(0xbf, dst, src1, src2);
}
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0x9f, dst, src1, src2);
}
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xaf, dst, src1, src2);
}
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vfmass(0xbf, dst, src1, src2);
}
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vmovd(XMMRegister dst, Register src);
- void vmovd(XMMRegister dst, const Operand& src);
+ void vmovd(XMMRegister dst, Operand src);
void vmovd(Register dst, XMMRegister src);
void vmovq(XMMRegister dst, Register src);
- void vmovq(XMMRegister dst, const Operand& src);
+ void vmovq(XMMRegister dst, Operand src);
void vmovq(Register dst, XMMRegister src);
void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vsd(0x10, dst, src1, src2);
}
- void vmovsd(XMMRegister dst, const Operand& src) {
- vsd(0x10, dst, xmm0, src);
- }
- void vmovsd(const Operand& dst, XMMRegister src) {
- vsd(0x11, src, xmm0, dst);
- }
+ void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
+ void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
#define AVX_SP_3(instr, opcode) \
AVX_S_3(instr, opcode) \
@@ -1464,12 +1441,12 @@ class Assembler : public AssemblerBase {
AVX_3(instr##ps, opcode, vps) \
AVX_3(instr##pd, opcode, vpd)
-#define AVX_3(instr, opcode, impl) \
- void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- impl(opcode, dst, src1, src2); \
- } \
- void instr(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- impl(opcode, dst, src1, src2); \
+#define AVX_3(instr, opcode, impl) \
+ void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ impl(opcode, dst, src1, src2); \
+ } \
+ void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ impl(opcode, dst, src1, src2); \
}
AVX_SP_3(vsqrt, 0x51);
@@ -1500,42 +1477,42 @@ class Assembler : public AssemblerBase {
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
}
- void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
}
void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
}
- void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
}
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
}
- void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
}
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = XMMRegister::from_code(src2.code());
vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
}
- void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
}
void vcvttss2si(Register dst, XMMRegister src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
- void vcvttss2si(Register dst, const Operand& src) {
+ void vcvttss2si(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
}
@@ -1543,7 +1520,7 @@ class Assembler : public AssemblerBase {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
- void vcvttsd2si(Register dst, const Operand& src) {
+ void vcvttsd2si(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
@@ -1551,7 +1528,7 @@ class Assembler : public AssemblerBase {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
- void vcvttss2siq(Register dst, const Operand& src) {
+ void vcvttss2siq(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
@@ -1559,7 +1536,7 @@ class Assembler : public AssemblerBase {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
- void vcvttsd2siq(Register dst, const Operand& src) {
+ void vcvttsd2siq(Register dst, Operand src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
@@ -1570,7 +1547,7 @@ class Assembler : public AssemblerBase {
void vucomisd(XMMRegister dst, XMMRegister src) {
vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
- void vucomisd(XMMRegister dst, const Operand& src) {
+ void vucomisd(XMMRegister dst, Operand src) {
vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -1587,39 +1564,27 @@ class Assembler : public AssemblerBase {
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
- void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
}
void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vss(0x10, dst, src1, src2);
}
- void vmovss(XMMRegister dst, const Operand& src) {
- vss(0x10, dst, xmm0, src);
- }
- void vmovss(const Operand& dst, XMMRegister src) {
- vss(0x11, src, xmm0, dst);
- }
+ void vmovss(XMMRegister dst, Operand src) { vss(0x10, dst, xmm0, src); }
+ void vmovss(Operand dst, XMMRegister src) { vss(0x11, src, xmm0, dst); }
void vucomiss(XMMRegister dst, XMMRegister src);
- void vucomiss(XMMRegister dst, const Operand& src);
+ void vucomiss(XMMRegister dst, Operand src);
void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
- void vmovups(XMMRegister dst, const Operand& src) {
- vps(0x10, dst, xmm0, src);
- }
- void vmovups(const Operand& dst, XMMRegister src) {
- vps(0x11, src, xmm0, dst);
- }
+ void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
+ void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
- void vmovupd(XMMRegister dst, const Operand& src) {
- vpd(0x10, dst, xmm0, src);
- }
- void vmovupd(const Operand& dst, XMMRegister src) {
- vpd(0x11, src, xmm0, dst);
- }
+ void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
+ void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
void vmovmskps(Register dst, XMMRegister src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vps(0x50, idst, xmm0, src);
@@ -1632,8 +1597,7 @@ class Assembler : public AssemblerBase {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
- void vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t cmp) {
+ void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
@@ -1641,24 +1605,23 @@ class Assembler : public AssemblerBase {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
- void vcmppd(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t cmp) {
+ void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmppd(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vcmppd(dst, src1, src2, imm8); \
+#define AVX_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vcmppd(dst, src1, src2, imm8); \
}
AVX_CMP_P(vcmpeq, 0x0);
@@ -1670,7 +1633,7 @@ class Assembler : public AssemblerBase {
#undef AVX_CMP_P
- void vlddqu(XMMRegister dst, const Operand& src) {
+ void vlddqu(XMMRegister dst, Operand src) {
vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
}
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
@@ -1702,7 +1665,7 @@ class Assembler : public AssemblerBase {
vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrb(const Operand& dst, XMMRegister src, int8_t imm8) {
+ void vpextrb(Operand dst, XMMRegister src, int8_t imm8) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1711,7 +1674,7 @@ class Assembler : public AssemblerBase {
vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
emit(imm8);
}
- void vpextrw(const Operand& dst, XMMRegister src, int8_t imm8) {
+ void vpextrw(Operand dst, XMMRegister src, int8_t imm8) {
vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1720,7 +1683,7 @@ class Assembler : public AssemblerBase {
vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrd(const Operand& dst, XMMRegister src, int8_t imm8) {
+ void vpextrd(Operand dst, XMMRegister src, int8_t imm8) {
vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1729,8 +1692,7 @@ class Assembler : public AssemblerBase {
vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t imm8) {
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1739,8 +1701,7 @@ class Assembler : public AssemblerBase {
vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
emit(imm8);
}
- void vpinsrw(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t imm8) {
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
emit(imm8);
}
@@ -1749,8 +1710,7 @@ class Assembler : public AssemblerBase {
vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrd(XMMRegister dst, XMMRegister src1, const Operand& src2,
- int8_t imm8) {
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
@@ -1760,150 +1720,150 @@ class Assembler : public AssemblerBase {
}
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
// BMI instruction
void andnq(Register dst, Register src1, Register src2) {
bmi1q(0xf2, dst, src1, src2);
}
- void andnq(Register dst, Register src1, const Operand& src2) {
+ void andnq(Register dst, Register src1, Operand src2) {
bmi1q(0xf2, dst, src1, src2);
}
void andnl(Register dst, Register src1, Register src2) {
bmi1l(0xf2, dst, src1, src2);
}
- void andnl(Register dst, Register src1, const Operand& src2) {
+ void andnl(Register dst, Register src1, Operand src2) {
bmi1l(0xf2, dst, src1, src2);
}
void bextrq(Register dst, Register src1, Register src2) {
bmi1q(0xf7, dst, src2, src1);
}
- void bextrq(Register dst, const Operand& src1, Register src2) {
+ void bextrq(Register dst, Operand src1, Register src2) {
bmi1q(0xf7, dst, src2, src1);
}
void bextrl(Register dst, Register src1, Register src2) {
bmi1l(0xf7, dst, src2, src1);
}
- void bextrl(Register dst, const Operand& src1, Register src2) {
+ void bextrl(Register dst, Operand src1, Register src2) {
bmi1l(0xf7, dst, src2, src1);
}
void blsiq(Register dst, Register src) { bmi1q(0xf3, rbx, dst, src); }
- void blsiq(Register dst, const Operand& src) { bmi1q(0xf3, rbx, dst, src); }
+ void blsiq(Register dst, Operand src) { bmi1q(0xf3, rbx, dst, src); }
void blsil(Register dst, Register src) { bmi1l(0xf3, rbx, dst, src); }
- void blsil(Register dst, const Operand& src) { bmi1l(0xf3, rbx, dst, src); }
+ void blsil(Register dst, Operand src) { bmi1l(0xf3, rbx, dst, src); }
void blsmskq(Register dst, Register src) { bmi1q(0xf3, rdx, dst, src); }
- void blsmskq(Register dst, const Operand& src) { bmi1q(0xf3, rdx, dst, src); }
+ void blsmskq(Register dst, Operand src) { bmi1q(0xf3, rdx, dst, src); }
void blsmskl(Register dst, Register src) { bmi1l(0xf3, rdx, dst, src); }
- void blsmskl(Register dst, const Operand& src) { bmi1l(0xf3, rdx, dst, src); }
+ void blsmskl(Register dst, Operand src) { bmi1l(0xf3, rdx, dst, src); }
void blsrq(Register dst, Register src) { bmi1q(0xf3, rcx, dst, src); }
- void blsrq(Register dst, const Operand& src) { bmi1q(0xf3, rcx, dst, src); }
+ void blsrq(Register dst, Operand src) { bmi1q(0xf3, rcx, dst, src); }
void blsrl(Register dst, Register src) { bmi1l(0xf3, rcx, dst, src); }
- void blsrl(Register dst, const Operand& src) { bmi1l(0xf3, rcx, dst, src); }
+ void blsrl(Register dst, Operand src) { bmi1l(0xf3, rcx, dst, src); }
void tzcntq(Register dst, Register src);
- void tzcntq(Register dst, const Operand& src);
+ void tzcntq(Register dst, Operand src);
void tzcntl(Register dst, Register src);
- void tzcntl(Register dst, const Operand& src);
+ void tzcntl(Register dst, Operand src);
void lzcntq(Register dst, Register src);
- void lzcntq(Register dst, const Operand& src);
+ void lzcntq(Register dst, Operand src);
void lzcntl(Register dst, Register src);
- void lzcntl(Register dst, const Operand& src);
+ void lzcntl(Register dst, Operand src);
void popcntq(Register dst, Register src);
- void popcntq(Register dst, const Operand& src);
+ void popcntq(Register dst, Operand src);
void popcntl(Register dst, Register src);
- void popcntl(Register dst, const Operand& src);
+ void popcntl(Register dst, Operand src);
void bzhiq(Register dst, Register src1, Register src2) {
bmi2q(kNone, 0xf5, dst, src2, src1);
}
- void bzhiq(Register dst, const Operand& src1, Register src2) {
+ void bzhiq(Register dst, Operand src1, Register src2) {
bmi2q(kNone, 0xf5, dst, src2, src1);
}
void bzhil(Register dst, Register src1, Register src2) {
bmi2l(kNone, 0xf5, dst, src2, src1);
}
- void bzhil(Register dst, const Operand& src1, Register src2) {
+ void bzhil(Register dst, Operand src1, Register src2) {
bmi2l(kNone, 0xf5, dst, src2, src1);
}
void mulxq(Register dst1, Register dst2, Register src) {
bmi2q(kF2, 0xf6, dst1, dst2, src);
}
- void mulxq(Register dst1, Register dst2, const Operand& src) {
+ void mulxq(Register dst1, Register dst2, Operand src) {
bmi2q(kF2, 0xf6, dst1, dst2, src);
}
void mulxl(Register dst1, Register dst2, Register src) {
bmi2l(kF2, 0xf6, dst1, dst2, src);
}
- void mulxl(Register dst1, Register dst2, const Operand& src) {
+ void mulxl(Register dst1, Register dst2, Operand src) {
bmi2l(kF2, 0xf6, dst1, dst2, src);
}
void pdepq(Register dst, Register src1, Register src2) {
bmi2q(kF2, 0xf5, dst, src1, src2);
}
- void pdepq(Register dst, Register src1, const Operand& src2) {
+ void pdepq(Register dst, Register src1, Operand src2) {
bmi2q(kF2, 0xf5, dst, src1, src2);
}
void pdepl(Register dst, Register src1, Register src2) {
bmi2l(kF2, 0xf5, dst, src1, src2);
}
- void pdepl(Register dst, Register src1, const Operand& src2) {
+ void pdepl(Register dst, Register src1, Operand src2) {
bmi2l(kF2, 0xf5, dst, src1, src2);
}
void pextq(Register dst, Register src1, Register src2) {
bmi2q(kF3, 0xf5, dst, src1, src2);
}
- void pextq(Register dst, Register src1, const Operand& src2) {
+ void pextq(Register dst, Register src1, Operand src2) {
bmi2q(kF3, 0xf5, dst, src1, src2);
}
void pextl(Register dst, Register src1, Register src2) {
bmi2l(kF3, 0xf5, dst, src1, src2);
}
- void pextl(Register dst, Register src1, const Operand& src2) {
+ void pextl(Register dst, Register src1, Operand src2) {
bmi2l(kF3, 0xf5, dst, src1, src2);
}
void sarxq(Register dst, Register src1, Register src2) {
bmi2q(kF3, 0xf7, dst, src2, src1);
}
- void sarxq(Register dst, const Operand& src1, Register src2) {
+ void sarxq(Register dst, Operand src1, Register src2) {
bmi2q(kF3, 0xf7, dst, src2, src1);
}
void sarxl(Register dst, Register src1, Register src2) {
bmi2l(kF3, 0xf7, dst, src2, src1);
}
- void sarxl(Register dst, const Operand& src1, Register src2) {
+ void sarxl(Register dst, Operand src1, Register src2) {
bmi2l(kF3, 0xf7, dst, src2, src1);
}
void shlxq(Register dst, Register src1, Register src2) {
bmi2q(k66, 0xf7, dst, src2, src1);
}
- void shlxq(Register dst, const Operand& src1, Register src2) {
+ void shlxq(Register dst, Operand src1, Register src2) {
bmi2q(k66, 0xf7, dst, src2, src1);
}
void shlxl(Register dst, Register src1, Register src2) {
bmi2l(k66, 0xf7, dst, src2, src1);
}
- void shlxl(Register dst, const Operand& src1, Register src2) {
+ void shlxl(Register dst, Operand src1, Register src2) {
bmi2l(k66, 0xf7, dst, src2, src1);
}
void shrxq(Register dst, Register src1, Register src2) {
bmi2q(kF2, 0xf7, dst, src2, src1);
}
- void shrxq(Register dst, const Operand& src1, Register src2) {
+ void shrxq(Register dst, Operand src1, Register src2) {
bmi2q(kF2, 0xf7, dst, src2, src1);
}
void shrxl(Register dst, Register src1, Register src2) {
bmi2l(kF2, 0xf7, dst, src2, src1);
}
- void shrxl(Register dst, const Operand& src1, Register src2) {
+ void shrxl(Register dst, Operand src1, Register src2) {
bmi2l(kF2, 0xf7, dst, src2, src1);
}
void rorxq(Register dst, Register src, byte imm8);
- void rorxq(Register dst, const Operand& src, byte imm8);
+ void rorxq(Register dst, Operand src, byte imm8);
void rorxl(Register dst, Register src, byte imm8);
- void rorxl(Register dst, const Operand& src, byte imm8);
+ void rorxl(Register dst, Operand src, byte imm8);
void lfence();
void pause();
@@ -1961,7 +1921,7 @@ class Assembler : public AssemblerBase {
protected:
// Call near indirect
- void call(const Operand& operand);
+ void call(Operand operand);
private:
byte* addr_at(int pos) { return buffer_ + pos; }
@@ -1997,8 +1957,8 @@ class Assembler : public AssemblerBase {
// The high bit of reg is used for REX.R, the high bit of op's base
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is set.
- inline void emit_rex_64(Register reg, const Operand& op);
- inline void emit_rex_64(XMMRegister reg, const Operand& op);
+ inline void emit_rex_64(Register reg, Operand op);
+ inline void emit_rex_64(XMMRegister reg, Operand op);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the register code.
@@ -2011,7 +1971,7 @@ class Assembler : public AssemblerBase {
// The high bit of op's base register is used for REX.B, and the high
// bit of op's index register is used for REX.X.
// REX.W is set and REX.R clear.
- inline void emit_rex_64(const Operand& op);
+ inline void emit_rex_64(Operand op);
// Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
void emit_rex_64() { emit(0x48); }
@@ -2023,7 +1983,7 @@ class Assembler : public AssemblerBase {
// The high bit of reg is used for REX.R, the high bit of op's base
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is cleared.
- inline void emit_rex_32(Register reg, const Operand& op);
+ inline void emit_rex_32(Register reg, Operand op);
// High bit of rm_reg goes to REX.B.
// REX.W, REX.R and REX.X are clear.
@@ -2031,7 +1991,7 @@ class Assembler : public AssemblerBase {
// High bit of base goes to REX.B and high bit of index to REX.X.
// REX.W and REX.R are clear.
- inline void emit_rex_32(const Operand& op);
+ inline void emit_rex_32(Operand op);
// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
// REX.W is cleared. If no REX bits are set, no byte is emitted.
@@ -2041,7 +2001,7 @@ class Assembler : public AssemblerBase {
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
// is emitted.
- inline void emit_optional_rex_32(Register reg, const Operand& op);
+ inline void emit_optional_rex_32(Register reg, Operand op);
// As for emit_optional_rex_32(Register, Register), except that
// the registers are XMM registers.
@@ -2055,18 +2015,18 @@ class Assembler : public AssemblerBase {
// one of the registers is an XMM registers.
inline void emit_optional_rex_32(Register reg, XMMRegister base);
- // As for emit_optional_rex_32(Register, const Operand&), except that
+ // As for emit_optional_rex_32(Register, Operand), except that
// the register is an XMM register.
- inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
+ inline void emit_optional_rex_32(XMMRegister reg, Operand op);
// Optionally do as emit_rex_32(Register) if the register number has
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
inline void emit_optional_rex_32(XMMRegister rm_reg);
- // Optionally do as emit_rex_32(const Operand&) if the operand register
+ // Optionally do as emit_rex_32(Operand) if the operand register
// numbers have a high bit set.
- inline void emit_optional_rex_32(const Operand& op);
+ inline void emit_optional_rex_32(Operand op);
void emit_rex(int size) {
if (size == kInt64Size) {
@@ -2102,8 +2062,7 @@ class Assembler : public AssemblerBase {
SIMDPrefix pp);
void emit_vex3_byte0() { emit(0xc4); }
inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
- inline void emit_vex3_byte1(XMMRegister reg, const Operand& rm,
- LeadingOpcode m);
+ inline void emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m);
inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp);
inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
@@ -2112,10 +2071,10 @@ class Assembler : public AssemblerBase {
inline void emit_vex_prefix(Register reg, Register v, Register rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
- inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, const Operand& rm,
+ inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, Operand rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
- inline void emit_vex_prefix(Register reg, Register v, const Operand& rm,
+ inline void emit_vex_prefix(Register reg, Register v, Operand rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode m,
VexW w);
@@ -2123,14 +2082,14 @@ class Assembler : public AssemblerBase {
// 1- or 4-byte offset for a memory operand. Also encodes
// the second operand of the operation, a register or operation
// subcode, into the reg field of the ModR/M byte.
- void emit_operand(Register reg, const Operand& adr) {
+ void emit_operand(Register reg, Operand adr) {
emit_operand(reg.low_bits(), adr);
}
// Emit the ModR/M byte, and optionally the SIB byte and
// 1- or 4-byte offset for a memory operand. Also used to encode
// a three-bit opcode extension into the ModR/M byte.
- void emit_operand(int rm, const Operand& adr);
+ void emit_operand(int rm, Operand adr);
// Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
void emit_modrm(Register reg, Register rm_reg) {
@@ -2149,8 +2108,8 @@ class Assembler : public AssemblerBase {
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(Register reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister reg, Operand adr);
+ void emit_sse_operand(Register reg, Operand adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst);
@@ -2160,37 +2119,28 @@ class Assembler : public AssemblerBase {
// similar, differing just in the opcode or in the reg field of the
// ModR/M byte.
void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_8(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op_8(byte opcode, Register reg, Operand rm_reg);
void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op_16(byte opcode, Register reg, Operand rm_reg);
// Operate on operands/registers with pointer size, 32-bit or 64-bit size.
void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
- void arithmetic_op(byte opcode,
- Register reg,
- const Operand& rm_reg,
- int size);
+ void arithmetic_op(byte opcode, Register reg, Operand rm_reg, int size);
// Operate on a byte in memory or register.
void immediate_arithmetic_op_8(byte subcode,
Register dst,
Immediate src);
- void immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src);
+ void immediate_arithmetic_op_8(byte subcode, Operand dst, Immediate src);
// Operate on a word in memory or register.
void immediate_arithmetic_op_16(byte subcode,
Register dst,
Immediate src);
- void immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src);
+ void immediate_arithmetic_op_16(byte subcode, Operand dst, Immediate src);
// Operate on operands/registers with pointer size, 32-bit or 64-bit size.
void immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src,
int size);
- void immediate_arithmetic_op(byte subcode,
- const Operand& dst,
- Immediate src,
+ void immediate_arithmetic_op(byte subcode, Operand dst, Immediate src,
int size);
// Emit machine code for a shift operation.
@@ -2218,15 +2168,15 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x0, dst, src, size);
}
- void emit_add(Register dst, const Operand& src, int size) {
+ void emit_add(Register dst, Operand src, int size) {
arithmetic_op(0x03, dst, src, size);
}
- void emit_add(const Operand& dst, Register src, int size) {
+ void emit_add(Operand dst, Register src, int size) {
arithmetic_op(0x1, src, dst, size);
}
- void emit_add(const Operand& dst, Immediate src, int size) {
+ void emit_add(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x0, dst, src, size);
}
@@ -2234,11 +2184,11 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x23, dst, src, size);
}
- void emit_and(Register dst, const Operand& src, int size) {
+ void emit_and(Register dst, Operand src, int size) {
arithmetic_op(0x23, dst, src, size);
}
- void emit_and(const Operand& dst, Register src, int size) {
+ void emit_and(Operand dst, Register src, int size) {
arithmetic_op(0x21, src, dst, size);
}
@@ -2246,7 +2196,7 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x4, dst, src, size);
}
- void emit_and(const Operand& dst, Immediate src, int size) {
+ void emit_and(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x4, dst, src, size);
}
@@ -2254,11 +2204,11 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x3B, dst, src, size);
}
- void emit_cmp(Register dst, const Operand& src, int size) {
+ void emit_cmp(Register dst, Operand src, int size) {
arithmetic_op(0x3B, dst, src, size);
}
- void emit_cmp(const Operand& dst, Register src, int size) {
+ void emit_cmp(Operand dst, Register src, int size) {
arithmetic_op(0x39, src, dst, size);
}
@@ -2266,17 +2216,17 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x7, dst, src, size);
}
- void emit_cmp(const Operand& dst, Immediate src, int size) {
+ void emit_cmp(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x7, dst, src, size);
}
// Compare {al,ax,eax,rax} with src. If equal, set ZF and write dst into
// src. Otherwise clear ZF and write src into {al,ax,eax,rax}. This
// operation is only atomic if prefixed by the lock instruction.
- void emit_cmpxchg(const Operand& dst, Register src, int size);
+ void emit_cmpxchg(Operand dst, Register src, int size);
void emit_dec(Register dst, int size);
- void emit_dec(const Operand& dst, int size);
+ void emit_dec(Operand dst, int size);
// Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
// Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
@@ -2287,43 +2237,43 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions.
// rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
void emit_imul(Register src, int size);
- void emit_imul(const Operand& src, int size);
+ void emit_imul(Operand src, int size);
void emit_imul(Register dst, Register src, int size);
- void emit_imul(Register dst, const Operand& src, int size);
+ void emit_imul(Register dst, Operand src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
- void emit_imul(Register dst, const Operand& src, Immediate imm, int size);
+ void emit_imul(Register dst, Operand src, Immediate imm, int size);
void emit_inc(Register dst, int size);
- void emit_inc(const Operand& dst, int size);
+ void emit_inc(Operand dst, int size);
- void emit_lea(Register dst, const Operand& src, int size);
+ void emit_lea(Register dst, Operand src, int size);
- void emit_mov(Register dst, const Operand& src, int size);
+ void emit_mov(Register dst, Operand src, int size);
void emit_mov(Register dst, Register src, int size);
- void emit_mov(const Operand& dst, Register src, int size);
+ void emit_mov(Operand dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
- void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_mov(Operand dst, Immediate value, int size);
- void emit_movzxb(Register dst, const Operand& src, int size);
+ void emit_movzxb(Register dst, Operand src, int size);
void emit_movzxb(Register dst, Register src, int size);
- void emit_movzxw(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, Operand src, int size);
void emit_movzxw(Register dst, Register src, int size);
void emit_neg(Register dst, int size);
- void emit_neg(const Operand& dst, int size);
+ void emit_neg(Operand dst, int size);
void emit_not(Register dst, int size);
- void emit_not(const Operand& dst, int size);
+ void emit_not(Operand dst, int size);
void emit_or(Register dst, Register src, int size) {
arithmetic_op(0x0B, dst, src, size);
}
- void emit_or(Register dst, const Operand& src, int size) {
+ void emit_or(Register dst, Operand src, int size) {
arithmetic_op(0x0B, dst, src, size);
}
- void emit_or(const Operand& dst, Register src, int size) {
+ void emit_or(Operand dst, Register src, int size) {
arithmetic_op(0x9, src, dst, size);
}
@@ -2331,7 +2281,7 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x1, dst, src, size);
}
- void emit_or(const Operand& dst, Immediate src, int size) {
+ void emit_or(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x1, dst, src, size);
}
@@ -2349,28 +2299,28 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x5, dst, src, size);
}
- void emit_sub(Register dst, const Operand& src, int size) {
+ void emit_sub(Register dst, Operand src, int size) {
arithmetic_op(0x2B, dst, src, size);
}
- void emit_sub(const Operand& dst, Register src, int size) {
+ void emit_sub(Operand dst, Register src, int size) {
arithmetic_op(0x29, src, dst, size);
}
- void emit_sub(const Operand& dst, Immediate src, int size) {
+ void emit_sub(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x5, dst, src, size);
}
void emit_test(Register dst, Register src, int size);
void emit_test(Register reg, Immediate mask, int size);
- void emit_test(const Operand& op, Register reg, int size);
- void emit_test(const Operand& op, Immediate mask, int size);
- void emit_test(Register reg, const Operand& op, int size) {
+ void emit_test(Operand op, Register reg, int size);
+ void emit_test(Operand op, Immediate mask, int size);
+ void emit_test(Register reg, Operand op, int size) {
return emit_test(op, reg, size);
}
void emit_xchg(Register dst, Register src, int size);
- void emit_xchg(Register dst, const Operand& src, int size);
+ void emit_xchg(Register dst, Operand src, int size);
void emit_xor(Register dst, Register src, int size) {
if (size == kInt64Size && dst.code() == src.code()) {
@@ -2382,7 +2332,7 @@ class Assembler : public AssemblerBase {
}
}
- void emit_xor(Register dst, const Operand& src, int size) {
+ void emit_xor(Register dst, Operand src, int size) {
arithmetic_op(0x33, dst, src, size);
}
@@ -2390,25 +2340,23 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x6, dst, src, size);
}
- void emit_xor(const Operand& dst, Immediate src, int size) {
+ void emit_xor(Operand dst, Immediate src, int size) {
immediate_arithmetic_op(0x6, dst, src, size);
}
- void emit_xor(const Operand& dst, Register src, int size) {
+ void emit_xor(Operand dst, Register src, int size) {
arithmetic_op(0x31, src, dst, size);
}
// Most BMI instructions are similar.
void bmi1q(byte op, Register reg, Register vreg, Register rm);
- void bmi1q(byte op, Register reg, Register vreg, const Operand& rm);
+ void bmi1q(byte op, Register reg, Register vreg, Operand rm);
void bmi1l(byte op, Register reg, Register vreg, Register rm);
- void bmi1l(byte op, Register reg, Register vreg, const Operand& rm);
+ void bmi1l(byte op, Register reg, Register vreg, Operand rm);
void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
- void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm);
+ void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
- void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
- const Operand& rm);
+ void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
// record the position of jmp/jcc instruction
void record_farjmp_position(Label* L, int pos);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 27061c1e2b..2ff00f0402 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -135,7 +135,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label fast_power, try_arithmetic_simplification;
// Detect integer exponents stored as double.
__ DoubleToI(exponent, double_exponent, double_scratch,
- TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
&try_arithmetic_simplification,
&try_arithmetic_simplification);
__ jmp(&int_exponent);
@@ -425,6 +424,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ movp(rdi, masm->ExternalOperand(pending_handler_entrypoint_address));
__ jmp(rdi);
@@ -609,7 +614,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Call the entry hook function.
__ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
- Assembler::RelocInfoNone());
+ RelocInfo::NONE);
AllowExternalCallThatCantCauseGC scope(masm);
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 25a74b98fc..ee2cfd5e8b 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -28,9 +28,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 247f5e889e..91cee67bdd 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -10,6 +10,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/lazy-instance.h"
+#include "src/base/v8-fallthrough.h"
#include "src/disasm.h"
#include "src/x64/sse-instr.h"
@@ -1840,6 +1841,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xD9) {
mnemonic = "psubusw";
} else if (opcode == 0xDA) {
+ mnemonic = "pand";
+ } else if (opcode == 0xDB) {
mnemonic = "pminub";
} else if (opcode == 0xDC) {
mnemonic = "paddusb";
@@ -1857,6 +1860,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "psubsw";
} else if (opcode == 0xEA) {
mnemonic = "pminsw";
+ } else if (opcode == 0xEB) {
+ mnemonic = "por";
} else if (opcode == 0xEC) {
mnemonic = "paddsb";
} else if (opcode == 0xED) {
@@ -2703,7 +2708,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xF6:
- byte_size_operand_ = true; // fall through
+ byte_size_operand_ = true;
+ V8_FALLTHROUGH;
case 0xF7:
data += F6F7Instruction(data);
break;
@@ -2814,6 +2820,11 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(buffer, instruction);
}
+int Disassembler::InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ DisassemblerX64 d(converter_, ABORT_ON_UNIMPLEMENTED_OPCODE);
+ return d.InstructionDecode(buffer, instruction);
+}
// The X64 assembler does not use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) {
diff --git a/deps/v8/src/x64/frame-constants-x64.h b/deps/v8/src/x64/frame-constants-x64.h
index 95bad4c01a..07d2d1a8b1 100644
--- a/deps/v8/src/x64/frame-constants-x64.h
+++ b/deps/v8/src/x64/frame-constants-x64.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_FRAMES_X64_H_
-#define V8_X64_FRAMES_X64_H_
+#ifndef V8_X64_FRAME_CONSTANTS_X64_H_
+#define V8_X64_FRAME_CONSTANTS_X64_H_
namespace v8 {
namespace internal {
@@ -55,4 +55,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_X64_FRAMES_X64_H_
+#endif // V8_X64_FRAME_CONSTANTS_X64_H_
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 22bad696d2..be32df3164 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -69,13 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-void FastNewClosureDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // SharedFunctionInfo, vector, slot index.
- Register registers[] = {rbx, rcx, rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 5019be3727..e09321e183 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -15,6 +15,7 @@
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/x64/assembler-x64.h"
@@ -187,7 +188,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
Move(kScratchRegister, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
+ RelocInfo::NONE);
}
Push(Immediate(static_cast<int32_t>(address)));
return;
@@ -213,8 +214,7 @@ void TurboAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
(index << kPointerSizeLog2) - kRootRegisterBias));
}
-void TurboAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
+void TurboAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
DCHECK(root_array_available_);
DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
@@ -256,9 +256,8 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(value, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
- Move(dst, reinterpret_cast<Address>(kZapValue), Assembler::RelocInfoNone());
+ Move(value, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
+ Move(dst, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
}
}
@@ -388,10 +387,8 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Move(address, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
- Move(value, reinterpret_cast<Address>(kZapValue),
- Assembler::RelocInfoNone());
+ Move(address, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
+ Move(value, reinterpret_cast<Address>(kZapValue), RelocInfo::NONE);
}
}
@@ -616,7 +613,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtss2sd(dst, dst, src);
@@ -634,7 +631,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvtsd2ss(dst, dst, src);
@@ -654,7 +651,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -676,7 +673,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -698,7 +695,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorps(dst, dst, dst);
@@ -720,7 +717,7 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
}
}
-void TurboAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
+void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vxorpd(dst, dst, dst);
@@ -775,7 +772,7 @@ void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttss2si(Register dst, const Operand& src) {
+void TurboAssembler::Cvttss2si(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2si(dst, src);
@@ -793,7 +790,7 @@ void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttsd2si(Register dst, const Operand& src) {
+void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2si(dst, src);
@@ -811,7 +808,7 @@ void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttss2siq(Register dst, const Operand& src) {
+void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
@@ -829,7 +826,7 @@ void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
}
}
-void TurboAssembler::Cvttsd2siq(Register dst, const Operand& src) {
+void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttsd2siq(dst, src);
@@ -838,8 +835,7 @@ void TurboAssembler::Cvttsd2siq(Register dst, const Operand& src) {
}
}
-
-void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+void MacroAssembler::Load(Register dst, Operand src, Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
movsxbq(dst, src);
@@ -856,8 +852,7 @@ void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
}
}
-
-void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
+void MacroAssembler::Store(Operand dst, Register src, Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
movb(dst, src);
@@ -887,7 +882,7 @@ void TurboAssembler::Set(Register dst, int64_t x) {
}
}
-void TurboAssembler::Set(const Operand& dst, intptr_t x) {
+void TurboAssembler::Set(Operand dst, intptr_t x) {
if (kPointerSize == kInt64Size) {
if (is_int32(x)) {
movp(dst, Immediate(static_cast<int32_t>(x)));
@@ -921,7 +916,7 @@ void TurboAssembler::Move(Register dst, Smi* source) {
if (value == 0) {
xorl(dst, dst);
} else {
- Move(dst, source, Assembler::RelocInfoNone());
+ Move(dst, source, RelocInfo::NONE);
}
}
@@ -947,7 +942,7 @@ void TurboAssembler::SmiToInteger32(Register dst, Register src) {
}
}
-void TurboAssembler::SmiToInteger32(Register dst, const Operand& src) {
+void TurboAssembler::SmiToInteger32(Register dst, Operand src) {
if (SmiValuesAre32Bits()) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
} else {
@@ -993,22 +988,19 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
}
}
-
-void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
+void MacroAssembler::SmiCompare(Register dst, Operand src) {
AssertSmi(dst);
AssertSmi(src);
cmpp(dst, src);
}
-
-void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+void MacroAssembler::SmiCompare(Operand dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
cmpp(dst, src);
}
-
-void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+void MacroAssembler::SmiCompare(Operand dst, Smi* src) {
AssertSmi(dst);
if (SmiValuesAre32Bits()) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
@@ -1018,8 +1010,7 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
}
}
-
-void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
+void MacroAssembler::Cmp(Operand dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
DCHECK(!dst.AddressUsesRegister(smi_reg));
@@ -1033,7 +1024,7 @@ Condition TurboAssembler::CheckSmi(Register src) {
return zero;
}
-Condition TurboAssembler::CheckSmi(const Operand& src) {
+Condition TurboAssembler::CheckSmi(Operand src) {
STATIC_ASSERT(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
return zero;
@@ -1059,7 +1050,7 @@ void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
j(NegateCondition(smi), on_not_smi, near_jump);
}
-void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
+void MacroAssembler::SmiAddConstant(Operand dst, Smi* constant) {
if (constant->value() != 0) {
if (SmiValuesAre32Bits()) {
addl(Operand(dst, kSmiShift / kBitsPerByte),
@@ -1167,10 +1158,10 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
Pcmpeqd(dst, dst);
} else if (pop + ntz == 64) {
Pcmpeqd(dst, dst);
- Psllq(dst, ntz);
+ Psllq(dst, static_cast<byte>(ntz));
} else if (pop + nlz == 64) {
Pcmpeqd(dst, dst);
- Psrlq(dst, nlz);
+ Psrlq(dst, static_cast<byte>(nlz));
} else {
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
@@ -1184,260 +1175,6 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Movaps(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovaps(dst, src);
- } else {
- movaps(dst, src);
- }
-}
-
-void TurboAssembler::Movups(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovups(dst, src);
- } else {
- movups(dst, src);
- }
-}
-
-void TurboAssembler::Movups(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovups(dst, src);
- } else {
- movups(dst, src);
- }
-}
-
-void TurboAssembler::Movups(const Operand& dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovups(dst, src);
- } else {
- movups(dst, src);
- }
-}
-
-void TurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovapd(dst, src);
- } else {
- movapd(dst, src);
- }
-}
-
-void TurboAssembler::Movsd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovsd(dst, dst, src);
- } else {
- movsd(dst, src);
- }
-}
-
-void TurboAssembler::Movsd(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovsd(dst, src);
- } else {
- movsd(dst, src);
- }
-}
-
-void TurboAssembler::Movsd(const Operand& dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovsd(dst, src);
- } else {
- movsd(dst, src);
- }
-}
-
-void TurboAssembler::Movss(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovss(dst, dst, src);
- } else {
- movss(dst, src);
- }
-}
-
-void TurboAssembler::Movss(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovss(dst, src);
- } else {
- movss(dst, src);
- }
-}
-
-void TurboAssembler::Movss(const Operand& dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovss(dst, src);
- } else {
- movss(dst, src);
- }
-}
-
-void TurboAssembler::Movd(XMMRegister dst, Register src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovd(dst, src);
- } else {
- movd(dst, src);
- }
-}
-
-void TurboAssembler::Movd(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovd(dst, src);
- } else {
- movd(dst, src);
- }
-}
-
-void TurboAssembler::Movd(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovd(dst, src);
- } else {
- movd(dst, src);
- }
-}
-
-void TurboAssembler::Movq(XMMRegister dst, Register src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovq(dst, src);
- } else {
- movq(dst, src);
- }
-}
-
-void TurboAssembler::Movq(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovq(dst, src);
- } else {
- movq(dst, src);
- }
-}
-
-void TurboAssembler::Movmskps(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovmskps(dst, src);
- } else {
- movmskps(dst, src);
- }
-}
-
-void TurboAssembler::Movmskpd(Register dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmovmskpd(dst, src);
- } else {
- movmskpd(dst, src);
- }
-}
-
-void TurboAssembler::Xorps(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vxorps(dst, dst, src);
- } else {
- xorps(dst, src);
- }
-}
-
-void TurboAssembler::Xorps(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vxorps(dst, dst, src);
- } else {
- xorps(dst, src);
- }
-}
-
-void TurboAssembler::Roundss(XMMRegister dst, XMMRegister src,
- RoundingMode mode) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vroundss(dst, dst, src, mode);
- } else {
- roundss(dst, src, mode);
- }
-}
-
-void TurboAssembler::Roundsd(XMMRegister dst, XMMRegister src,
- RoundingMode mode) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vroundsd(dst, dst, src, mode);
- } else {
- roundsd(dst, src, mode);
- }
-}
-
-void TurboAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vsqrtsd(dst, dst, src);
- } else {
- sqrtsd(dst, src);
- }
-}
-
-void TurboAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vsqrtsd(dst, dst, src);
- } else {
- sqrtsd(dst, src);
- }
-}
-
-void TurboAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomiss(src1, src2);
- } else {
- ucomiss(src1, src2);
- }
-}
-
-void TurboAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomiss(src1, src2);
- } else {
- ucomiss(src1, src2);
- }
-}
-
-void TurboAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomisd(src1, src2);
- } else {
- ucomisd(src1, src2);
- }
-}
-
-void TurboAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vucomisd(src1, src2);
- } else {
- ucomisd(src1, src2);
- }
-}
-
// ----------------------------------------------------------------------------
void MacroAssembler::Absps(XMMRegister dst) {
@@ -1470,8 +1207,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
}
}
-
-void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
+void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
@@ -1491,7 +1227,7 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
movp(result, reinterpret_cast<void*>(object.address()), rmode);
}
-void TurboAssembler::Move(const Operand& dst, Handle<HeapObject> object,
+void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
RelocInfo::Mode rmode) {
Move(kScratchRegister, object, rmode);
movp(dst, kScratchRegister);
@@ -1528,7 +1264,7 @@ void TurboAssembler::Push(Register src) {
}
}
-void TurboAssembler::Push(const Operand& src) {
+void TurboAssembler::Push(Operand src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
@@ -1538,8 +1274,7 @@ void TurboAssembler::Push(const Operand& src) {
}
}
-
-void MacroAssembler::PushQuad(const Operand& src) {
+void MacroAssembler::PushQuad(Operand src) {
if (kPointerSize == kInt64Size) {
pushq(src);
} else {
@@ -1579,8 +1314,7 @@ void MacroAssembler::Pop(Register dst) {
}
}
-
-void MacroAssembler::Pop(const Operand& dst) {
+void MacroAssembler::Pop(Operand dst) {
if (kPointerSize == kInt64Size) {
popq(dst);
} else {
@@ -1596,8 +1330,7 @@ void MacroAssembler::Pop(const Operand& dst) {
}
}
-
-void MacroAssembler::PopQuad(const Operand& dst) {
+void MacroAssembler::PopQuad(Operand dst) {
if (kPointerSize == kInt64Size) {
popq(dst);
} else {
@@ -1612,8 +1345,7 @@ void MacroAssembler::Jump(ExternalReference ext) {
jmp(kScratchRegister);
}
-
-void MacroAssembler::Jump(const Operand& op) {
+void MacroAssembler::Jump(Operand op) {
if (kPointerSize == kInt64Size) {
jmp(op);
} else {
@@ -1634,6 +1366,12 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
+void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
+ Address bytes_address = reinterpret_cast<Address>(stream->bytes());
+ Move(kOffHeapTrampolineRegister, bytes_address, RelocInfo::NONE);
+ jmp(kOffHeapTrampolineRegister);
+}
+
int TurboAssembler::CallSize(ExternalReference ext) {
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
return LoadAddressSize(ext) +
@@ -1649,7 +1387,7 @@ void TurboAssembler::Call(ExternalReference ext) {
DCHECK_EQ(end_position, pc_offset());
}
-void TurboAssembler::Call(const Operand& op) {
+void TurboAssembler::Call(Operand op) {
if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
call(op);
} else {
@@ -1751,7 +1489,7 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
}
-void TurboAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(imm8 == 0 || imm8 == 1);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
@@ -1781,7 +1519,7 @@ void TurboAssembler::Lzcntl(Register dst, Register src) {
xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
}
-void TurboAssembler::Lzcntl(Register dst, const Operand& src) {
+void TurboAssembler::Lzcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntl(dst, src);
@@ -1809,7 +1547,7 @@ void TurboAssembler::Lzcntq(Register dst, Register src) {
xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
}
-void TurboAssembler::Lzcntq(Register dst, const Operand& src) {
+void TurboAssembler::Lzcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
lzcntq(dst, src);
@@ -1837,7 +1575,7 @@ void TurboAssembler::Tzcntq(Register dst, Register src) {
bind(&not_zero_src);
}
-void TurboAssembler::Tzcntq(Register dst, const Operand& src) {
+void TurboAssembler::Tzcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntq(dst, src);
@@ -1864,7 +1602,7 @@ void TurboAssembler::Tzcntl(Register dst, Register src) {
bind(&not_zero_src);
}
-void TurboAssembler::Tzcntl(Register dst, const Operand& src) {
+void TurboAssembler::Tzcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(BMI1)) {
CpuFeatureScope scope(this, BMI1);
tzcntl(dst, src);
@@ -1886,7 +1624,7 @@ void TurboAssembler::Popcntl(Register dst, Register src) {
UNREACHABLE();
}
-void TurboAssembler::Popcntl(Register dst, const Operand& src) {
+void TurboAssembler::Popcntl(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntl(dst, src);
@@ -1904,7 +1642,7 @@ void TurboAssembler::Popcntq(Register dst, Register src) {
UNREACHABLE();
}
-void TurboAssembler::Popcntq(Register dst, const Operand& src) {
+void TurboAssembler::Popcntq(Register dst, Operand src) {
if (CpuFeatures::IsSupported(POPCNT)) {
CpuFeatureScope scope(this, POPCNT);
popcntq(dst, src);
@@ -2035,29 +1773,13 @@ void TurboAssembler::SlowTruncateToIDelayed(Zone* zone, Register result_reg) {
}
void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch,
- MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan,
- Label* minus_zero, Label::Distance dst) {
+ XMMRegister scratch, Label* lost_precision,
+ Label* is_nan, Label::Distance dst) {
Cvttsd2si(result_reg, input_reg);
Cvtlsi2sd(kScratchDoubleReg, result_reg);
Ucomisd(kScratchDoubleReg, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst); // NaN.
- if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- testl(result_reg, result_reg);
- j(not_zero, &done, Label::kNear);
- Movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // jump to minus_zero.
- andl(result_reg, Immediate(1));
- j(not_zero, minus_zero, dst);
- bind(&done);
- }
}
@@ -2076,8 +1798,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
-
-void MacroAssembler::AssertSmi(const Operand& object) {
+void MacroAssembler::AssertSmi(Operand object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
@@ -2310,6 +2031,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
+ static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
movp(rcx, FieldOperand(function, JSFunction::kCodeOffset));
addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@@ -2387,12 +2109,31 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook;
+ Label skip_hook, call_hook;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ Operand debug_is_active_operand = ExternalOperand(debug_is_active);
+ cmpb(debug_is_active_operand, Immediate(0));
+ j(equal, &skip_hook);
+
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
cmpb(debug_hook_active_operand, Immediate(0));
- j(equal, &skip_hook);
+ j(not_equal, &call_hook);
+
+ movp(kScratchRegister,
+ FieldOperand(fun, JSFunction::kSharedFunctionInfoOffset));
+ movp(kScratchRegister,
+ FieldOperand(kScratchRegister, SharedFunctionInfo::kDebugInfoOffset));
+ JumpIfSmi(kScratchRegister, &skip_hook);
+ movp(kScratchRegister,
+ FieldOperand(kScratchRegister, DebugInfo::kFlagsOffset));
+ SmiToInteger32(kScratchRegister, kScratchRegister);
+ testp(kScratchRegister, Immediate(DebugInfo::kBreakAtEntry));
+ j(zero, &skip_hook);
+
+ bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2446,13 +2187,13 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
if (type == StackFrame::INTERNAL) {
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
Push(kScratchRegister);
- }
- if (emit_debug_code()) {
- Move(kScratchRegister,
- isolate()->factory()->undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- cmpp(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ // Check at runtime that this code object was patched correctly.
+ if (emit_debug_code()) {
+ Move(kScratchRegister, isolate()->factory()->undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ cmpp(Operand(rsp, 0), kScratchRegister);
+ Check(not_equal, AbortReason::kCodeObjectNotProperlyPatched);
+ }
}
}
@@ -2739,6 +2480,22 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
}
j(cc, condition_met, condition_met_distance);
}
+
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ Label current;
+ bind(&current);
+ int pc = pc_offset();
+ // Load effective address to get the address of the current instruction.
+ leaq(dst, Operand(&current));
+ if (pc != 0) {
+ subq(dst, Immediate(pc));
+ }
+}
+
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ Set(kSpeculationPoisonRegister, -1);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 4ceab2cf9c..faa0462cd1 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -20,11 +20,13 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
+constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r12;
constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kJavaScriptCallArgCountRegister = rax;
+constexpr Register kJavaScriptCallCodeStartRegister = rcx;
constexpr Register kJavaScriptCallNewTargetRegister = rdx;
constexpr Register kRuntimeCallFunctionRegister = rbx;
constexpr Register kRuntimeCallArgCountRegister = rax;
@@ -38,6 +40,7 @@ constexpr Register kRootRegister = r13; // callee save
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
constexpr int kRootRegisterBias = 128;
+constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
@@ -133,51 +136,83 @@ class TurboAssembler : public Assembler {
return code_object_;
}
-#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
- void macro_name(XMMRegister dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else { \
- name(dst, src); \
- } \
+ template <typename Dst, typename... Args>
+ struct AvxHelper {
+ Assembler* assm;
+ // Call an method where the AVX version expects the dst argument to be
+ // duplicated.
+ template <void (Assembler::*avx)(Dst, Dst, Args...),
+ void (Assembler::*no_avx)(Dst, Args...)>
+ void emit(Dst dst, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, dst, args...);
+ } else {
+ (assm->*no_avx)(dst, args...);
+ }
+ }
+
+ // Call an method where the AVX version expects no duplicated dst argument.
+ template <void (Assembler::*avx)(Dst, Args...),
+ void (Assembler::*no_avx)(Dst, Args...)>
+ void emit(Dst dst, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, args...);
+ } else {
+ (assm->*no_avx)(dst, args...);
+ }
+ }
+ };
+
+#define AVX_OP(macro_name, name) \
+ template <typename Dst, typename... Args> \
+ void macro_name(Dst dst, Args... args) { \
+ AvxHelper<Dst, Args...>{this} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
}
-#define AVX_OP2_X(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
-#define AVX_OP2_O(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
-#define AVX_OP2_XO(macro_name, name) \
- AVX_OP2_X(macro_name, name) \
- AVX_OP2_O(macro_name, name)
-
- AVX_OP2_XO(Subsd, subsd)
- AVX_OP2_XO(Divss, divss)
- AVX_OP2_XO(Divsd, divsd)
- AVX_OP2_XO(Xorpd, xorpd)
- AVX_OP2_X(Pcmpeqd, pcmpeqd)
- AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
- AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
-
-#undef AVX_OP2_O
-#undef AVX_OP2_X
-#undef AVX_OP2_XO
-#undef AVX_OP2_WITH_TYPE
-
- void Xorps(XMMRegister dst, XMMRegister src);
- void Xorps(XMMRegister dst, const Operand& src);
-
- void Movd(XMMRegister dst, Register src);
- void Movd(XMMRegister dst, const Operand& src);
- void Movd(Register dst, XMMRegister src);
- void Movq(XMMRegister dst, Register src);
- void Movq(Register dst, XMMRegister src);
-
- void Movsd(XMMRegister dst, XMMRegister src);
- void Movsd(XMMRegister dst, const Operand& src);
- void Movsd(const Operand& dst, XMMRegister src);
- void Movss(XMMRegister dst, XMMRegister src);
- void Movss(XMMRegister dst, const Operand& src);
- void Movss(const Operand& dst, XMMRegister src);
+
+ AVX_OP(Subsd, subsd)
+ AVX_OP(Divss, divss)
+ AVX_OP(Divsd, divsd)
+ AVX_OP(Xorps, xorps)
+ AVX_OP(Xorpd, xorpd)
+ AVX_OP(Movd, movd)
+ AVX_OP(Movq, movq)
+ AVX_OP(Movaps, movaps)
+ AVX_OP(Movapd, movapd)
+ AVX_OP(Movups, movups)
+ AVX_OP(Movmskps, movmskps)
+ AVX_OP(Movmskpd, movmskpd)
+ AVX_OP(Movss, movss)
+ AVX_OP(Movsd, movsd)
+ AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Psllq, psllq)
+ AVX_OP(Psrlq, psrlq)
+ AVX_OP(Addsd, addsd)
+ AVX_OP(Mulsd, mulsd)
+ AVX_OP(Andps, andps)
+ AVX_OP(Andpd, andpd)
+ AVX_OP(Orpd, orpd)
+ AVX_OP(Cmpeqps, cmpeqps)
+ AVX_OP(Cmpltps, cmpltps)
+ AVX_OP(Cmpleps, cmpleps)
+ AVX_OP(Cmpneqps, cmpneqps)
+ AVX_OP(Cmpnltps, cmpnltps)
+ AVX_OP(Cmpnleps, cmpnleps)
+ AVX_OP(Cmpeqpd, cmpeqpd)
+ AVX_OP(Cmpltpd, cmpltpd)
+ AVX_OP(Cmplepd, cmplepd)
+ AVX_OP(Cmpneqpd, cmpneqpd)
+ AVX_OP(Cmpnltpd, cmpnltpd)
+ AVX_OP(Cmpnlepd, cmpnlepd)
+ AVX_OP(Roundss, roundss)
+ AVX_OP(Roundsd, roundsd)
+ AVX_OP(Sqrtsd, sqrtsd)
+ AVX_OP(Ucomiss, ucomiss)
+ AVX_OP(Ucomisd, ucomisd)
+
+#undef AVX_OP
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -190,25 +225,17 @@ class TurboAssembler : public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, intptr_t x);
+ void Set(Operand dst, intptr_t x);
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
- void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
+ void LoadRoot(Operand destination, Heap::RootListIndex index) {
LoadRoot(kScratchRegister, index);
movp(destination, kScratchRegister);
}
- void Movups(XMMRegister dst, XMMRegister src);
- void Movups(XMMRegister dst, const Operand& src);
- void Movups(const Operand& dst, XMMRegister src);
- void Movapd(XMMRegister dst, XMMRegister src);
- void Movaps(XMMRegister dst, XMMRegister src);
- void Movmskpd(Register dst, XMMRegister src);
- void Movmskps(Register dst, XMMRegister src);
-
void Push(Register src);
- void Push(const Operand& src);
+ void Push(Operand src);
void Push(Immediate value);
void Push(Smi* smi);
void Push(Handle<HeapObject> source);
@@ -239,23 +266,23 @@ class TurboAssembler : public Assembler {
Label::Distance condition_met_distance = Label::kFar);
void Cvtss2sd(XMMRegister dst, XMMRegister src);
- void Cvtss2sd(XMMRegister dst, const Operand& src);
+ void Cvtss2sd(XMMRegister dst, Operand src);
void Cvtsd2ss(XMMRegister dst, XMMRegister src);
- void Cvtsd2ss(XMMRegister dst, const Operand& src);
+ void Cvtsd2ss(XMMRegister dst, Operand src);
void Cvttsd2si(Register dst, XMMRegister src);
- void Cvttsd2si(Register dst, const Operand& src);
+ void Cvttsd2si(Register dst, Operand src);
void Cvttsd2siq(Register dst, XMMRegister src);
- void Cvttsd2siq(Register dst, const Operand& src);
+ void Cvttsd2siq(Register dst, Operand src);
void Cvttss2si(Register dst, XMMRegister src);
- void Cvttss2si(Register dst, const Operand& src);
+ void Cvttss2si(Register dst, Operand src);
void Cvttss2siq(Register dst, XMMRegister src);
- void Cvttss2siq(Register dst, const Operand& src);
+ void Cvttss2siq(Register dst, Operand src);
void Cvtqsi2ss(XMMRegister dst, Register src);
- void Cvtqsi2ss(XMMRegister dst, const Operand& src);
+ void Cvtqsi2ss(XMMRegister dst, Operand src);
void Cvtqsi2sd(XMMRegister dst, Register src);
- void Cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtqsi2sd(XMMRegister dst, Operand src);
void Cvtlsi2ss(XMMRegister dst, Register src);
- void Cvtlsi2ss(XMMRegister dst, const Operand& src);
+ void Cvtlsi2ss(XMMRegister dst, Operand src);
void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
@@ -263,35 +290,24 @@ class TurboAssembler : public Assembler {
// hinders register renaming and makes dependence chains longer. So we use
// xorpd to clear the dst register before cvtsi2sd to solve this issue.
void Cvtlsi2sd(XMMRegister dst, Register src);
- void Cvtlsi2sd(XMMRegister dst, const Operand& src);
-
- void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
-
- void Sqrtsd(XMMRegister dst, XMMRegister src);
- void Sqrtsd(XMMRegister dst, const Operand& src);
-
- void Ucomiss(XMMRegister src1, XMMRegister src2);
- void Ucomiss(XMMRegister src1, const Operand& src2);
- void Ucomisd(XMMRegister src1, XMMRegister src2);
- void Ucomisd(XMMRegister src1, const Operand& src2);
+ void Cvtlsi2sd(XMMRegister dst, Operand src);
void Lzcntq(Register dst, Register src);
- void Lzcntq(Register dst, const Operand& src);
+ void Lzcntq(Register dst, Operand src);
void Lzcntl(Register dst, Register src);
- void Lzcntl(Register dst, const Operand& src);
+ void Lzcntl(Register dst, Operand src);
void Tzcntq(Register dst, Register src);
- void Tzcntq(Register dst, const Operand& src);
+ void Tzcntq(Register dst, Operand src);
void Tzcntl(Register dst, Register src);
- void Tzcntl(Register dst, const Operand& src);
+ void Tzcntl(Register dst, Operand src);
void Popcntl(Register dst, Register src);
- void Popcntl(Register dst, const Operand& src);
+ void Popcntl(Register dst, Operand src);
void Popcntq(Register dst, Register src);
- void Popcntq(Register dst, const Operand& src);
+ void Popcntq(Register dst, Operand src);
// Is the value a tagged smi.
Condition CheckSmi(Register src);
- Condition CheckSmi(const Operand& src);
+ Condition CheckSmi(Operand src);
// Jump to label if the value is a tagged smi.
void JumpIfSmi(Register src, Label* on_smi,
@@ -299,7 +315,7 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Smi* source);
- void Move(const Operand& dst, Smi* source) {
+ void Move(Operand dst, Smi* source) {
Register constant = GetSmiConstant(source);
movp(dst, constant);
}
@@ -319,7 +335,7 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> source,
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
- void Move(const Operand& dst, Handle<HeapObject> source,
+ void Move(Operand dst, Handle<HeapObject> source,
RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
// Loads a pointer into a register with a relocation mode.
@@ -333,13 +349,13 @@ class TurboAssembler : public Assembler {
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
- void SmiToInteger32(Register dst, const Operand& src);
+ void SmiToInteger32(Register dst, Operand src);
// Loads the address of the external reference into the destination
// register.
void LoadAddress(Register destination, ExternalReference source);
- void Call(const Operand& op);
+ void Call(Operand op);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
@@ -365,7 +381,7 @@ class TurboAssembler : public Assembler {
// Opcode: REX_opt FF /2 m64
return (target.high_bit() != 0) ? 3 : 2;
}
- int CallSize(const Operand& target) {
+ int CallSize(Operand target) {
// Opcode: REX_opt FF /2 m64
return (target.requires_rex() ? 2 : 1) + target.operand_size();
}
@@ -377,10 +393,10 @@ class TurboAssembler : public Assembler {
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
+ void CompareRoot(Operand with, Heap::RootListIndex index);
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
@@ -476,6 +492,12 @@ class TurboAssembler : public Assembler {
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ void ResetSpeculationPoisonRegister();
+
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int smi_count = 0;
@@ -551,8 +573,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
- void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
- Label* if_equal,
+ void JumpIfRoot(Operand with, Heap::RootListIndex index, Label* if_equal,
Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
@@ -565,7 +586,7 @@ class MacroAssembler : public TurboAssembler {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
- void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ void JumpIfNotRoot(Operand with, Heap::RootListIndex index,
Label* if_not_equal,
Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
@@ -649,10 +670,6 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag);
-
// ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values.
@@ -668,9 +685,9 @@ class MacroAssembler : public TurboAssembler {
// otherwise use Cmp.
void SmiCompare(Register smi1, Register smi2);
void SmiCompare(Register dst, Smi* src);
- void SmiCompare(Register dst, const Operand& src);
- void SmiCompare(const Operand& dst, Register src);
- void SmiCompare(const Operand& dst, Smi* src);
+ void SmiCompare(Register dst, Operand src);
+ void SmiCompare(Operand dst, Register src);
+ void SmiCompare(Operand dst, Smi* src);
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
@@ -694,7 +711,7 @@ class MacroAssembler : public TurboAssembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
- void SmiAddConstant(const Operand& dst, Smi* constant);
+ void SmiAddConstant(Operand dst, Smi* constant);
// Specialized operations
@@ -712,13 +729,13 @@ class MacroAssembler : public TurboAssembler {
// Macro instructions.
// Load/store with specific representation.
- void Load(Register dst, const Operand& src, Representation r);
- void Store(const Operand& dst, Register src, Representation r);
+ void Load(Register dst, Operand src, Representation r);
+ void Store(Operand dst, Register src, Representation r);
void Cmp(Register dst, Handle<Object> source);
- void Cmp(const Operand& dst, Handle<Object> source);
+ void Cmp(Operand dst, Handle<Object> source);
void Cmp(Register dst, Smi* src);
- void Cmp(const Operand& dst, Smi* src);
+ void Cmp(Operand dst, Smi* src);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
@@ -729,51 +746,11 @@ class MacroAssembler : public TurboAssembler {
void DropUnderReturnAddress(int stack_elements,
Register scratch = kScratchRegister);
- void PushQuad(const Operand& src);
+ void PushQuad(Operand src);
void PushImm32(int32_t imm32);
void Pop(Register dst);
- void Pop(const Operand& dst);
- void PopQuad(const Operand& dst);
-
-#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
- void macro_name(XMMRegister dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else { \
- name(dst, src); \
- } \
- }
-#define AVX_OP2_X(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
-#define AVX_OP2_O(macro_name, name) \
- AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
-#define AVX_OP2_XO(macro_name, name) \
- AVX_OP2_X(macro_name, name) \
- AVX_OP2_O(macro_name, name)
-
- AVX_OP2_XO(Addsd, addsd)
- AVX_OP2_XO(Mulsd, mulsd)
- AVX_OP2_XO(Andps, andps)
- AVX_OP2_XO(Andpd, andpd)
- AVX_OP2_XO(Orpd, orpd)
- AVX_OP2_XO(Cmpeqps, cmpeqps)
- AVX_OP2_XO(Cmpltps, cmpltps)
- AVX_OP2_XO(Cmpleps, cmpleps)
- AVX_OP2_XO(Cmpneqps, cmpneqps)
- AVX_OP2_XO(Cmpnltps, cmpnltps)
- AVX_OP2_XO(Cmpnleps, cmpnleps)
- AVX_OP2_XO(Cmpeqpd, cmpeqpd)
- AVX_OP2_XO(Cmpltpd, cmpltpd)
- AVX_OP2_XO(Cmplepd, cmplepd)
- AVX_OP2_XO(Cmpneqpd, cmpneqpd)
- AVX_OP2_XO(Cmpnltpd, cmpnltpd)
- AVX_OP2_XO(Cmpnlepd, cmpnlepd)
-
-#undef AVX_OP2_O
-#undef AVX_OP2_X
-#undef AVX_OP2_XO
-#undef AVX_OP2_WITH_TYPE
+ void Pop(Operand dst);
+ void PopQuad(Operand dst);
// ---------------------------------------------------------------------------
// SIMD macros.
@@ -785,9 +762,12 @@ class MacroAssembler : public TurboAssembler {
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
- void Jump(const Operand& op);
+ void Jump(Operand op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(const InstructionStream* stream);
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
@@ -806,8 +786,7 @@ class MacroAssembler : public TurboAssembler {
void CmpInstanceType(Register map, InstanceType type);
void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, MinusZeroMode minus_zero_mode,
- Label* lost_precision, Label* is_nan, Label* minus_zero,
+ XMMRegister scratch, Label* lost_precision, Label* is_nan,
Label::Distance dst = Label::kFar);
template<typename Field>
@@ -825,7 +804,7 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a smi, enabled via --debug-code.
void AssertSmi(Register object);
- void AssertSmi(const Operand& object);
+ void AssertSmi(Operand object);
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
diff --git a/deps/v8/src/x64/sse-instr.h b/deps/v8/src/x64/sse-instr.h
index 235aa75fcf..a6614c2346 100644
--- a/deps/v8/src/x64/sse-instr.h
+++ b/deps/v8/src/x64/sse-instr.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SSE_INSTR_H_
-#define V8_SSE_INSTR_H_
+#ifndef V8_X64_SSE_INSTR_H_
+#define V8_X64_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
V(packsswb, 66, 0F, 63) \
@@ -70,4 +70,4 @@
V(pmulld, 66, 0F, 38, 40) \
V(ptest, 66, 0F, 38, 17)
-#endif // V8_SSE_INSTR_H_
+#endif // V8_X64_SSE_INSTR_H_
diff --git a/deps/v8/src/zone/accounting-allocator.h b/deps/v8/src/zone/accounting-allocator.h
index 53d30b3826..bf36a7ff95 100644
--- a/deps/v8/src/zone/accounting-allocator.h
+++ b/deps/v8/src/zone/accounting-allocator.h
@@ -6,6 +6,7 @@
#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
#include "include/v8-platform.h"
+#include "include/v8.h"
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/macros.h"
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index a0aaca8b09..229a3f3f40 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -8,18 +8,14 @@
#include "src/utils.h"
#include "src/zone/zone.h"
-#ifndef V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
-#define V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+#ifndef V8_ZONE_ZONE_CHUNK_LIST_H_
+#define V8_ZONE_ZONE_CHUNK_LIST_H_
namespace v8 {
namespace internal {
-template <typename T>
+template <typename T, bool backwards, bool modifiable>
class ZoneChunkListIterator;
-template <typename T>
-class ForwardZoneChunkListIterator;
-template <typename T>
-class ReverseZoneChunkListIterator;
// A zone-backed hybrid of a vector and a linked list. Use it if you need a
// collection that
@@ -38,6 +34,11 @@ class ReverseZoneChunkListIterator;
template <typename T>
class ZoneChunkList : public ZoneObject {
public:
+ using iterator = ZoneChunkListIterator<T, false, true>;
+ using const_iterator = ZoneChunkListIterator<T, false, false>;
+ using reverse_iterator = ZoneChunkListIterator<T, true, true>;
+ using const_reverse_iterator = ZoneChunkListIterator<T, true, false>;
+
enum class StartMode {
// The list will not allocate a starting chunk. Use if you expect your
// list to remain empty in many cases.
@@ -58,7 +59,7 @@ class ZoneChunkList : public ZoneObject {
}
}
- size_t size() const;
+ size_t size() const { return size_; }
T& front() const;
T& back() const;
@@ -78,27 +79,31 @@ class ZoneChunkList : public ZoneObject {
// Quickly scans the list to retrieve the element at the given index. Will
// *not* check bounds.
- ForwardZoneChunkListIterator<T> Find(const size_t index);
- ForwardZoneChunkListIterator<const T> Find(const size_t index) const;
+ iterator Find(const size_t index);
+ const_iterator Find(const size_t index) const;
// TODO(heimbuef): Add 'rFind', seeking from the end and returning a
// reverse iterator.
void CopyTo(T* ptr);
- ForwardZoneChunkListIterator<T> begin();
- ForwardZoneChunkListIterator<T> end();
- ReverseZoneChunkListIterator<T> rbegin();
- ReverseZoneChunkListIterator<T> rend();
- ForwardZoneChunkListIterator<const T> begin() const;
- ForwardZoneChunkListIterator<const T> end() const;
- ReverseZoneChunkListIterator<const T> rbegin() const;
- ReverseZoneChunkListIterator<const T> rend() const;
+ iterator begin() { return iterator::Begin(this); }
+ iterator end() { return iterator::End(this); }
+ reverse_iterator rbegin() { return reverse_iterator::Begin(this); }
+ reverse_iterator rend() { return reverse_iterator::End(this); }
+ const_iterator begin() const { return const_iterator::Begin(this); }
+ const_iterator end() const { return const_iterator::End(this); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator::Begin(this);
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator::End(this);
+ }
private:
- friend class ZoneChunkListIterator<T>;
- friend class ForwardZoneChunkListIterator<T>;
- friend class ReverseZoneChunkListIterator<T>;
- static const uint32_t kMaxChunkCapacity = 256u;
+ template <typename S, bool backwards, bool modifiable>
+ friend class ZoneChunkListIterator;
+
+ static constexpr uint32_t kMaxChunkCapacity = 256u;
STATIC_ASSERT(kMaxChunkCapacity == static_cast<uint32_t>(StartMode::kBig));
@@ -108,6 +113,7 @@ class ZoneChunkList : public ZoneObject {
Chunk* next_ = nullptr;
Chunk* previous_ = nullptr;
T* items() { return reinterpret_cast<T*>(this + 1); }
+ const T* items() const { return reinterpret_cast<const T*>(this + 1); }
};
Chunk* NewChunk(const uint32_t capacity) {
@@ -135,152 +141,108 @@ class ZoneChunkList : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(ZoneChunkList);
};
-template <typename T>
+template <typename T, bool backwards, bool modifiable>
class ZoneChunkListIterator {
+ private:
+ template <typename S>
+ using maybe_const =
+ typename std::conditional<modifiable, S,
+ typename std::add_const<S>::type>::type;
+ using Chunk = maybe_const<typename ZoneChunkList<T>::Chunk>;
+ using ChunkList = maybe_const<ZoneChunkList<T>>;
+
public:
- T& operator*() { return current_->items()[position_]; }
- bool operator==(const ZoneChunkListIterator& other) {
+ maybe_const<T>& operator*() { return current_->items()[position_]; }
+ bool operator==(const ZoneChunkListIterator& other) const {
return other.current_ == current_ && other.position_ == position_;
}
- bool operator!=(const ZoneChunkListIterator& other) {
+ bool operator!=(const ZoneChunkListIterator& other) const {
return !operator==(other);
}
- protected:
- ZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
- size_t position)
- : current_(current), position_(position) {}
-
- void MoveNext() {
- ++position_;
- if (position_ >= current_->capacity_) {
- current_ = current_->next_;
- position_ = 0;
- }
- }
-
- void MoveRNext() {
- if (position_ == 0) {
- current_ = current_->previous_;
- position_ = current_ ? current_->capacity_ - 1 : 0;
- } else {
- --position_;
- }
- }
-
- typename ZoneChunkList<T>::Chunk* current_;
- size_t position_;
-};
-
-template <typename T>
-class ForwardZoneChunkListIterator : public ZoneChunkListIterator<T> {
- using ZoneChunkListIterator<T>::current_;
- using ZoneChunkListIterator<T>::position_;
- using ZoneChunkListIterator<T>::MoveNext;
- using ZoneChunkListIterator<T>::MoveRNext;
-
- public:
- ForwardZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
- size_t position)
- : ZoneChunkListIterator<T>(current, position) {}
-
- ForwardZoneChunkListIterator& operator++() {
- MoveNext();
+ ZoneChunkListIterator& operator++() {
+ Move<backwards>();
return *this;
}
- ForwardZoneChunkListIterator operator++(int) {
- ForwardZoneChunkListIterator<T> clone(*this);
- MoveNext();
+ ZoneChunkListIterator operator++(int) {
+ ZoneChunkListIterator clone(*this);
+ Move<backwards>();
return clone;
}
- ForwardZoneChunkListIterator& operator--() {
- MoveRNext();
+ ZoneChunkListIterator& operator--() {
+ Move<!backwards>();
return *this;
}
- ForwardZoneChunkListIterator operator--(int) {
- ForwardZoneChunkListIterator<T> clone(*this);
- MoveRNext();
+ ZoneChunkListIterator operator--(int) {
+ ZoneChunkListIterator clone(*this);
+ Move<!backwards>();
return clone;
}
private:
friend class ZoneChunkList<T>;
- static ForwardZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
- return ForwardZoneChunkListIterator<T>(list->front_, 0);
- }
- static ForwardZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
- if (list->back_ == nullptr) return Begin(list);
- DCHECK_LE(list->back_->position_, list->back_->capacity_);
- if (list->back_->position_ == list->back_->capacity_) {
- return ForwardZoneChunkListIterator<T>(nullptr, 0);
- }
+ static ZoneChunkListIterator Begin(ChunkList* list) {
+ // Forward iterator:
+ if (!backwards) return ZoneChunkListIterator(list->front_, 0);
- return ForwardZoneChunkListIterator<T>(list->back_, list->back_->position_);
+ // Backward iterator:
+ if (list->back_ == nullptr) return End(list);
+ if (list->back_->position_ == 0) {
+ if (list->back_->previous_ != nullptr) {
+ return ZoneChunkListIterator(list->back_->previous_,
+ list->back_->previous_->capacity_ - 1);
+ } else {
+ return End(list);
+ }
+ }
+ return ZoneChunkListIterator(list->back_, list->back_->position_ - 1);
}
-};
-template <typename T>
-class ReverseZoneChunkListIterator : public ZoneChunkListIterator<T> {
- using ZoneChunkListIterator<T>::current_;
- using ZoneChunkListIterator<T>::position_;
- using ZoneChunkListIterator<T>::MoveNext;
- using ZoneChunkListIterator<T>::MoveRNext;
+ static ZoneChunkListIterator End(ChunkList* list) {
+ // Backward iterator:
+ if (backwards) return ZoneChunkListIterator(nullptr, 0);
- public:
- ReverseZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
- size_t position)
- : ZoneChunkListIterator<T>(current, position) {}
-
- ReverseZoneChunkListIterator& operator++() {
- MoveRNext();
- return *this;
- }
+ // Forward iterator:
+ if (list->back_ == nullptr) return Begin(list);
- ReverseZoneChunkListIterator operator++(int) {
- ReverseZoneChunkListIterator<T> clone(*this);
- MoveRNext();
- return clone;
- }
+ DCHECK_LE(list->back_->position_, list->back_->capacity_);
+ if (list->back_->position_ == list->back_->capacity_) {
+ return ZoneChunkListIterator(list->back_->next_, 0);
+ }
- ReverseZoneChunkListIterator& operator--() {
- MoveNext();
- return *this;
+ return ZoneChunkListIterator(list->back_, list->back_->position_);
}
- ReverseZoneChunkListIterator operator--(int) {
- ForwardZoneChunkListIterator<T> clone(*this);
- MoveNext();
- return clone;
- }
+ ZoneChunkListIterator(Chunk* current, size_t position)
+ : current_(current), position_(position) {}
- private:
- friend class ZoneChunkList<T>;
- static ReverseZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
- if (list->back_ == nullptr) return End(list);
- if (list->back_->position_ == 0) {
- if (list->back_->previous_ != nullptr) {
- return ReverseZoneChunkListIterator<T>(
- list->back_->previous_, list->back_->previous_->capacity_ - 1);
+ template <bool move_backward>
+ void Move() {
+ if (move_backward) {
+ // Move backwards.
+ if (position_ == 0) {
+ current_ = current_->previous_;
+ position_ = current_ ? current_->capacity_ - 1 : 0;
} else {
- return End(list);
+ --position_;
+ }
+ } else {
+ // Move forwards.
+ ++position_;
+ if (position_ >= current_->capacity_) {
+ current_ = current_->next_;
+ position_ = 0;
}
}
- return ReverseZoneChunkListIterator<T>(list->back_,
- list->back_->position_ - 1);
- }
- static ReverseZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
- return ReverseZoneChunkListIterator<T>(nullptr, 0);
}
-};
-template <typename T>
-size_t ZoneChunkList<T>::size() const {
- return size_;
-}
+ Chunk* current_;
+ size_t position_;
+};
template <typename T>
T& ZoneChunkList<T>::front() const {
@@ -327,6 +289,7 @@ void ZoneChunkList<T>::pop_back() {
back_ = back_->previous_;
}
--back_->position_;
+ --size_;
}
template <typename T>
@@ -380,18 +343,18 @@ void ZoneChunkList<T>::Rewind(const size_t limit) {
}
template <typename T>
-ForwardZoneChunkListIterator<T> ZoneChunkList<T>::Find(const size_t index) {
+typename ZoneChunkList<T>::iterator ZoneChunkList<T>::Find(const size_t index) {
SeekResult seek_result = SeekIndex(index);
- return ForwardZoneChunkListIterator<T>(seek_result.chunk_,
- seek_result.chunk_index_);
+ return typename ZoneChunkList<T>::iterator(seek_result.chunk_,
+ seek_result.chunk_index_);
}
template <typename T>
-ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::Find(
+typename ZoneChunkList<T>::const_iterator ZoneChunkList<T>::Find(
const size_t index) const {
SeekResult seek_result = SeekIndex(index);
- return ForwardZoneChunkListIterator<const T>(seek_result.chunk_,
- seek_result.chunk_index_);
+ return typename ZoneChunkList<T>::const_iterator(seek_result.chunk_,
+ seek_result.chunk_index_);
}
template <typename T>
@@ -407,47 +370,7 @@ void ZoneChunkList<T>::CopyTo(T* ptr) {
}
}
-template <typename T>
-ForwardZoneChunkListIterator<T> ZoneChunkList<T>::begin() {
- return ForwardZoneChunkListIterator<T>::Begin(this);
-}
-
-template <typename T>
-ForwardZoneChunkListIterator<T> ZoneChunkList<T>::end() {
- return ForwardZoneChunkListIterator<T>::End(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rbegin() {
- return ReverseZoneChunkListIterator<T>::Begin(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rend() {
- return ReverseZoneChunkListIterator<T>::End(this);
-}
-
-template <typename T>
-ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::begin() const {
- return ForwardZoneChunkListIterator<const T>::Begin(this);
-}
-
-template <typename T>
-ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::end() const {
- return ForwardZoneChunkListIterator<const T>::End(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rbegin() const {
- return ReverseZoneChunkListIterator<const T>::Begin(this);
-}
-
-template <typename T>
-ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rend() const {
- return ReverseZoneChunkListIterator<const T>::End(this);
-}
-
} // namespace internal
} // namespace v8
-#endif // V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+#endif // V8_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 5e9fd0440a..c899bf340d 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SRC_ZONE_ZONE_CONTAINERS_H_
-#define V8_SRC_ZONE_ZONE_CONTAINERS_H_
+#ifndef V8_ZONE_ZONE_CONTAINERS_H_
+#define V8_ZONE_ZONE_CONTAINERS_H_
#include <deque>
#include <forward_list>
@@ -190,4 +190,4 @@ typedef ZoneVector<int> IntVector;
} // namespace internal
} // namespace v8
-#endif // V8_SRC_ZONE_ZONE_CONTAINERS_H_
+#endif // V8_ZONE_ZONE_CONTAINERS_H_
diff --git a/deps/v8/src/zone/zone-handle-set.h b/deps/v8/src/zone/zone-handle-set.h
index 9abc89a30e..c5297902d8 100644
--- a/deps/v8/src/zone/zone-handle-set.h
+++ b/deps/v8/src/zone/zone-handle-set.h
@@ -195,11 +195,13 @@ class ZoneHandleSet<T>::const_iterator {
typedef std::forward_iterator_tag iterator_category;
typedef std::ptrdiff_t difference_type;
typedef Handle<T> value_type;
+ typedef value_type reference;
+ typedef value_type* pointer;
const_iterator(const const_iterator& other)
: set_(other.set_), current_(other.current_) {}
- Handle<T> operator*() const { return (*set_)[current_]; }
+ reference operator*() const { return (*set_)[current_]; }
bool operator==(const const_iterator& other) const {
return set_ == other.set_ && current_ == other.current_;
}
diff --git a/deps/v8/test/benchmarks/benchmarks.gyp b/deps/v8/test/benchmarks/benchmarks.gyp
deleted file mode 100644
index 0822ee4ecb..0000000000
--- a/deps/v8/test/benchmarks/benchmarks.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'benchmarks_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'benchmarks.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 9db4a277d6..53acd19be4 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -36,4 +36,17 @@
'kraken/imaging-gaussian-blur': [PASS, SLOW],
'octane/typescript': [PASS, SLOW],
}], # ALWAYS
+
+['variant == stress', {
+ # Too slow for stress mode.
+ 'octane/mandreel': [SKIP],
+ 'octane/typescript': [SKIP],
+}],
+
+['gc_fuzzer', {
+ # Too slow for gc fuzzing.
+ 'octane/earley-boyer' : [PASS, SLOW, ['mode == debug', SKIP]],
+ 'octane/splay': [SKIP],
+ 'octane/typescript': [SKIP],
+}], # 'gc_fuzzer'
]
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index fd956aa765..dbae4e6216 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -34,17 +34,12 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-class VariantsGenerator(testsuite.VariantsGenerator):
- def _get_variants(self, test):
- return self._standard_variant
-
-
class TestSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(TestSuite, self).__init__(name, root)
- self.testroot = os.path.join(root, "data")
+ def __init__(self, *args, **kwargs):
+ super(TestSuite, self).__init__(*args, **kwargs)
+ self.testroot = os.path.join(self.root, "data")
- def ListTests(self, context):
+ def ListTests(self):
tests = map(self._create_test, [
"kraken/ai-astar",
"kraken/audio-beat-detection",
@@ -109,15 +104,9 @@ class TestSuite(testsuite.TestSuite):
def _test_class(self):
return TestCase
- def _variants_gen_class(self):
- return VariantsGenerator
-
- def _LegacyVariantsGeneratorFactory(self):
- return testsuite.StandardLegacyVariantsGenerator
-
class TestCase(testcase.TestCase):
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
path = self.path
testroot = self.suite.testroot
files = []
@@ -144,5 +133,5 @@ class TestCase(testcase.TestCase):
return os.path.join(self.suite.testroot, self.path + self._get_suffix())
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/bot_default.gyp b/deps/v8/test/bot_default.gyp
deleted file mode 100644
index 11223e068f..0000000000
--- a/deps/v8/test/bot_default.gyp
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'bot_default_run',
- 'type': 'none',
- 'dependencies': [
- 'cctest/cctest.gyp:cctest_run',
- 'debugger/debugger.gyp:debugger_run',
- 'fuzzer/fuzzer.gyp:fuzzer_run',
- 'inspector/inspector.gyp:inspector-test_run',
- 'intl/intl.gyp:intl_run',
- 'message/message.gyp:message_run',
- 'mjsunit/mjsunit.gyp:mjsunit_run',
- 'preparser/preparser.gyp:preparser_run',
- 'unittests/unittests.gyp:unittests_run',
- 'wasm-spec-tests/wasm-spec-tests.gyp:wasm_spec_tests_run',
- 'webkit/webkit.gyp:webkit_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'bot_default.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index b6c9109d75..3fed0751af 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -180,6 +180,7 @@ v8_source_set("cctest_sources") {
"test-identity-map.cc",
"test-inobject-slack-tracking.cc",
"test-intl.cc",
+ "test-isolate-independent-builtins.cc",
"test-liveedit.cc",
"test-lockers.cc",
"test-log.cc",
@@ -227,6 +228,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc",
"wasm/test-run-wasm-relocation.cc",
+ "wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm.cc",
"wasm/test-streaming-compilation.cc",
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index a5bb7db64a..87396a0535 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,5 +1,6 @@
per-file *-mips*=ivica.bogosavljevic@mips.com
per-file *-mips*=Miran.Karic@mips.com
+per-file *-mips*=sreten.kovacevic@mips.com
per-file *-ppc*=dstence@us.ibm.com
per-file *-ppc*=joransiu@ca.ibm.com
per-file *-ppc*=jyan@ca.ibm.com
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
deleted file mode 100644
index 5124495ee8..0000000000
--- a/deps/v8/test/cctest/cctest.gyp
+++ /dev/null
@@ -1,523 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# The sources are kept automatically in sync with BUILD.gn.
-
-{
- 'variables': {
- 'v8_code': 1,
- 'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
- 'cctest_sources': [ ### gcmole(all) ###
- 'compiler/c-signature.h',
- 'compiler/call-tester.h',
- 'compiler/codegen-tester.cc',
- 'compiler/codegen-tester.h',
- 'compiler/code-assembler-tester.h',
- 'compiler/function-tester.cc',
- 'compiler/function-tester.h',
- 'compiler/graph-builder-tester.h',
- 'compiler/test-basic-block-profiler.cc',
- 'compiler/test-branch-combine.cc',
- 'compiler/test-run-unwinding-info.cc',
- 'compiler/test-gap-resolver.cc',
- 'compiler/test-graph-visualizer.cc',
- 'compiler/test-code-generator.cc',
- 'compiler/test-code-assembler.cc',
- 'compiler/test-instruction.cc',
- 'compiler/test-js-context-specialization.cc',
- 'compiler/test-js-constant-cache.cc',
- 'compiler/test-js-typed-lowering.cc',
- 'compiler/test-jump-threading.cc',
- 'compiler/test-linkage.cc',
- 'compiler/test-loop-analysis.cc',
- 'compiler/test-machine-operator-reducer.cc',
- 'compiler/test-multiple-return.cc',
- 'compiler/test-node.cc',
- 'compiler/test-operator.cc',
- 'compiler/test-representation-change.cc',
- 'compiler/test-run-bytecode-graph-builder.cc',
- 'compiler/test-run-calls-to-external-references.cc',
- 'compiler/test-run-deopt.cc',
- 'compiler/test-run-intrinsics.cc',
- 'compiler/test-run-jsbranches.cc',
- 'compiler/test-run-jscalls.cc',
- 'compiler/test-run-jsexceptions.cc',
- 'compiler/test-run-jsobjects.cc',
- 'compiler/test-run-jsops.cc',
- 'compiler/test-run-load-store.cc',
- 'compiler/test-run-machops.cc',
- 'compiler/test-run-native-calls.cc',
- 'compiler/test-run-retpoline.cc',
- 'compiler/test-run-stackcheck.cc',
- 'compiler/test-run-stubs.cc',
- 'compiler/test-run-tail-calls.cc',
- 'compiler/test-run-variables.cc',
- 'compiler/test-run-wasm-machops.cc',
- 'compiler/value-helper.cc',
- 'compiler/value-helper.h',
- 'cctest.cc',
- 'cctest.h',
- 'expression-type-collector-macros.h',
- 'gay-fixed.cc',
- 'gay-fixed.h',
- 'gay-precision.cc',
- 'gay-precision.h',
- 'gay-shortest.cc',
- 'gay-shortest.h',
- 'heap/heap-tester.h',
- 'heap/heap-utils.cc',
- 'heap/heap-utils.h',
- 'heap/test-alloc.cc',
- 'heap/test-array-buffer-tracker.cc',
- 'heap/test-compaction.cc',
- 'heap/test-concurrent-marking.cc',
- 'heap/test-embedder-tracing.cc',
- 'heap/test-heap.cc',
- 'heap/test-incremental-marking.cc',
- 'heap/test-invalidated-slots.cc',
- 'heap/test-lab.cc',
- 'heap/test-mark-compact.cc',
- 'heap/test-page-promotion.cc',
- 'heap/test-spaces.cc',
- 'interpreter/interpreter-tester.cc',
- 'interpreter/interpreter-tester.h',
- 'interpreter/source-position-matcher.cc',
- 'interpreter/source-position-matcher.h',
- 'interpreter/test-bytecode-generator.cc',
- 'interpreter/test-interpreter.cc',
- 'interpreter/test-interpreter-intrinsics.cc',
- 'interpreter/test-source-positions.cc',
- 'interpreter/bytecode-expectations-printer.cc',
- 'interpreter/bytecode-expectations-printer.h',
- 'libplatform/test-tracing.cc',
- 'libsampler/test-sampler.cc',
- 'parsing/test-parse-decision.cc',
- 'parsing/test-preparser.cc',
- 'parsing/test-scanner-streams.cc',
- 'parsing/test-scanner.cc',
- 'print-extension.cc',
- 'print-extension.h',
- 'profiler-extension.cc',
- 'profiler-extension.h',
- 'scope-test-helper.h',
- 'setup-isolate-for-tests.cc',
- 'setup-isolate-for-tests.h',
- 'test-access-checks.cc',
- 'test-accessor-assembler.cc',
- 'test-accessors.cc',
- 'test-allocation.cc',
- 'test-api.cc',
- 'test-api.h',
- 'test-api-accessors.cc',
- 'test-api-interceptors.cc',
- 'test-array-list.cc',
- 'test-atomicops.cc',
- 'test-bignum.cc',
- 'test-bignum-dtoa.cc',
- 'test-bit-vector.cc',
- 'test-circular-queue.cc',
- 'test-code-layout.cc',
- 'test-code-stub-assembler.cc',
- 'test-compiler.cc',
- 'test-constantpool.cc',
- 'test-conversions.cc',
- 'test-cpu-profiler.cc',
- 'test-date.cc',
- 'test-debug.cc',
- 'test-decls.cc',
- 'test-deoptimization.cc',
- 'test-dictionary.cc',
- 'test-diy-fp.cc',
- 'test-double.cc',
- 'test-dtoa.cc',
- 'test-elements-kind.cc',
- 'test-fast-dtoa.cc',
- 'test-feedback-vector.cc',
- 'test-feedback-vector.h',
- 'test-field-type-tracking.cc',
- 'test-fixed-dtoa.cc',
- 'test-flags.cc',
- 'test-func-name-inference.cc',
- 'test-global-handles.cc',
- 'test-global-object.cc',
- 'test-hashcode.cc',
- 'test-hashmap.cc',
- 'test-heap-profiler.cc',
- 'test-identity-map.cc',
- 'test-intl.cc',
- 'test-inobject-slack-tracking.cc',
- 'test-liveedit.cc',
- 'test-lockers.cc',
- 'test-log.cc',
- 'test-managed.cc',
- 'test-mementos.cc',
- 'test-modules.cc',
- 'test-object.cc',
- 'test-orderedhashtable.cc',
- 'test-parsing.cc',
- 'test-platform.cc',
- 'test-profile-generator.cc',
- 'test-random-number-generator.cc',
- 'test-regexp.cc',
- 'test-representation.cc',
- 'test-sampler-api.cc',
- 'test-serialize.cc',
- 'test-strings.cc',
- 'test-symbols.cc',
- 'test-strtod.cc',
- 'test-thread-termination.cc',
- 'test-threads.cc',
- 'test-trace-event.cc',
- 'test-traced-value.cc',
- 'test-transitions.cc',
- 'test-transitions.h',
- 'test-typedarrays.cc',
- 'test-types.cc',
- 'test-unbound-queue.cc',
- 'test-unboxed-doubles.cc',
- 'test-unscopables-hidden-prototype.cc',
- 'test-usecounters.cc',
- 'test-utils.cc',
- 'test-version.cc',
- 'test-weakmaps.cc',
- 'test-weaksets.cc',
- 'trace-extension.cc',
- 'trace-extension.h',
- 'types-fuzz.h',
- 'unicode-helpers.h',
- 'wasm/test-c-wasm-entry.cc',
- 'wasm/test-streaming-compilation.cc',
- 'wasm/test-run-wasm.cc',
- 'wasm/test-run-wasm-64.cc',
- 'wasm/test-run-wasm-asmjs.cc',
- 'wasm/test-run-wasm-atomics.cc',
- 'wasm/test-run-wasm-interpreter.cc',
- 'wasm/test-run-wasm-js.cc',
- 'wasm/test-run-wasm-module.cc',
- 'wasm/test-run-wasm-relocation.cc',
- 'wasm/test-run-wasm-simd.cc',
- 'wasm/test-wasm-breakpoints.cc',
- "wasm/test-wasm-codegen.cc",
- 'wasm/test-wasm-interpreter-entry.cc',
- 'wasm/test-wasm-stack.cc',
- 'wasm/test-wasm-trap-position.cc',
- 'wasm/wasm-run-utils.cc',
- 'wasm/wasm-run-utils.h',
- ],
- 'cctest_sources_ia32': [ ### gcmole(arch:ia32) ###
- 'test-assembler-ia32.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-ia32.cc',
- 'test-disasm-ia32.cc',
- 'test-log-stack-tracer.cc',
- 'test-run-wasm-relocation-ia32.cc',
- ],
- 'cctest_sources_x64': [ ### gcmole(arch:x64) ###
- 'test-assembler-x64.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-x64.cc',
- 'test-disasm-x64.cc',
- 'test-macro-assembler-x64.cc',
- 'test-log-stack-tracer.cc',
- 'test-run-wasm-relocation-x64.cc',
- ],
- 'cctest_sources_arm': [ ### gcmole(arch:arm) ###
- 'assembler-helper-arm.cc',
- 'assembler-helper-arm.h',
- 'test-assembler-arm.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-arm.cc',
- 'test-disasm-arm.cc',
- 'test-macro-assembler-arm.cc',
- 'test-run-wasm-relocation-arm.cc',
- 'test-sync-primitives-arm.cc',
- ],
- 'cctest_sources_arm64': [ ### gcmole(arch:arm64) ###
- 'test-utils-arm64.cc',
- 'test-utils-arm64.h',
- 'test-assembler-arm64.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-arm64.cc',
- 'test-disasm-arm64.cc',
- 'test-fuzz-arm64.cc',
- 'test-javascript-arm64.cc',
- 'test-js-arm64-variables.cc',
- 'test-run-wasm-relocation-arm64.cc',
- 'test-sync-primitives-arm64.cc',
- ],
- 'cctest_sources_s390': [ ### gcmole(arch:s390) ###
- 'test-assembler-s390.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-disasm-s390.cc',
- ],
- 'cctest_sources_ppc': [ ### gcmole(arch:ppc) ###
- 'test-assembler-ppc.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-disasm-ppc.cc',
- ],
- 'cctest_sources_mips': [ ### gcmole(arch:mips) ###
- 'test-assembler-mips.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-mips.cc',
- 'test-disasm-mips.cc',
- 'test-macro-assembler-mips.cc',
- ],
- 'cctest_sources_mipsel': [ ### gcmole(arch:mipsel) ###
- 'test-assembler-mips.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-mips.cc',
- 'test-disasm-mips.cc',
- 'test-macro-assembler-mips.cc',
- ],
- 'cctest_sources_mips64': [ ### gcmole(arch:mips64) ###
- 'test-assembler-mips64.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-mips64.cc',
- 'test-disasm-mips64.cc',
- 'test-macro-assembler-mips64.cc',
- ],
- 'cctest_sources_mips64el': [ ### gcmole(arch:mips64el) ###
- 'test-assembler-mips64.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs.h',
- 'test-code-stubs-mips64.cc',
- 'test-disasm-mips64.cc',
- 'test-macro-assembler-mips64.cc',
- ],
- },
- 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'cctest',
- 'type': 'executable',
- 'dependencies': [
- 'resources',
- '../../src/v8.gyp:v8_libbase',
- '../../src/v8.gyp:v8_libplatform',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- '../common/wasm/flag-utils.h',
- '../common/wasm/test-signatures.h',
- '../common/wasm/wasm-macro-gen.h',
- '../common/wasm/wasm-module-runner.cc',
- '../common/wasm/wasm-module-runner.h',
- '<@(cctest_sources)',
- '<(generated_file)',
- ],
- 'conditions': [
- ['v8_target_arch=="ia32"', {
- 'sources': [
- '<@(cctest_sources_ia32)',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [
- '<@(cctest_sources_x64)',
- ],
- }],
- ['v8_target_arch=="arm"', {
- 'sources': [
- '<@(cctest_sources_arm)',
- ],
- }],
- ['v8_target_arch=="arm64"', {
- 'sources': [
- '<@(cctest_sources_arm64)',
- ],
- }],
- ['v8_target_arch=="s390"', {
- 'sources': [
- '<@(cctest_sources_s390)',
- ],
- }],
- ['v8_target_arch=="s390x"', {
- 'sources': [
- '<@(cctest_sources_s390)',
- ],
- }],
- ['v8_target_arch=="ppc"', {
- 'sources': [
- '<@(cctest_sources_ppc)',
- ],
- }],
- ['v8_target_arch=="ppc64"', {
- 'sources': [
- '<@(cctest_sources_ppc)',
- ],
- }],
- ['v8_target_arch=="mips"', {
- 'sources': [
- '<@(cctest_sources_mips)',
- ],
- }],
- ['v8_target_arch=="mipsel"', {
- 'sources': [
- '<@(cctest_sources_mipsel)',
- ],
- }],
- ['v8_target_arch=="mips64"', {
- 'sources': [
- '<@(cctest_sources_mips64)',
- ],
- }],
- ['v8_target_arch=="mips64el"', {
- 'sources': [
- '<@(cctest_sources_mips64el)',
- ],
- }],
- [ 'OS=="win"', {
- 'msvs_settings': {
- 'VCCLCompilerTool': {
- # MSVS wants this for gay-{precision,shortest}.cc.
- 'AdditionalOptions': ['/bigobj'],
- },
- },
- }],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64" \
- or v8_target_arch=="arm" or v8_target_arch=="arm64" \
- or v8_target_arch=="s390" or v8_target_arch=="s390x" \
- or v8_target_arch=="mips" or v8_target_arch=="mips64" \
- or v8_target_arch=="mipsel" or v8_target_arch=="mips64el"', {
- # disable fmadd/fmsub so that expected results match generated code in
- # RunFloat64MulAndFloat64Add1 and friends.
- 'cflags': ['-ffp-contract=off'],
- }],
- ['OS=="aix"', {
- 'ldflags': [ '-Wl,-bbigtoc' ],
- }],
- ['component=="shared_library"', {
- # cctest can't be built against a shared library, so we need to
- # depend on the underlying static target in that case.
- 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
- 'defines': [ 'BUILDING_V8_SHARED', ]
- }, {
- 'dependencies': ['../../src/v8.gyp:v8'],
- }],
- ['v8_use_snapshot=="true"', {
- 'dependencies': ['../../src/v8.gyp:v8_initializers'],
- }],
- ],
- },
- {
- 'target_name': 'resources',
- 'type': 'none',
- 'variables': {
- 'file_list': [
- '../../tools/splaytree.js',
- '../../tools/codemap.js',
- '../../tools/csvparser.js',
- '../../tools/consarray.js',
- '../../tools/profile.js',
- '../../tools/profile_view.js',
- '../../tools/arguments.js',
- '../../tools/logreader.js',
- 'log-eq-of-logging-and-traversal.js',
- ],
- },
- 'actions': [
- {
- 'action_name': 'js2c',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(file_list)',
- ],
- 'outputs': [
- '<(generated_file)',
- ],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<@(_outputs)',
- 'TEST', # type
- '<@(file_list)',
- ],
- }
- ],
- },
- {
- 'target_name': 'generate-bytecode-expectations',
- 'type': 'executable',
- 'dependencies': [
- '../../src/v8.gyp:v8',
- '../../src/v8.gyp:v8_libbase',
- '../../src/v8.gyp:v8_libplatform',
- ],
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- 'interpreter/bytecode-expectations-printer.cc',
- 'interpreter/bytecode-expectations-printer.h',
- 'interpreter/generate-bytecode-expectations.cc',
- ],
- },
- ],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'cctest_exe_run',
- 'type': 'none',
- 'dependencies': [
- 'cctest',
- ],
- 'includes': [
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'cctest_exe.isolate',
- ],
- },
- {
- 'target_name': 'cctest_run',
- 'type': 'none',
- 'dependencies': [
- 'cctest_exe_run',
- ],
- 'includes': [
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'cctest.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 37c4c0cfac..72c7a2f78c 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -454,25 +454,6 @@ static inline v8::Local<v8::Value> CompileRun(
}
-static inline v8::Local<v8::Value> ParserCacheCompileRun(const char* source) {
- // Compile once just to get the preparse data, then compile the second time
- // using the data.
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::ScriptCompiler::Source script_source(v8_str(source));
- v8::ScriptCompiler::Compile(context, &script_source,
- v8::ScriptCompiler::kProduceParserCache)
- .ToLocalChecked();
-
- // Check whether we received cached data, and if so use it.
- v8::ScriptCompiler::CompileOptions options =
- script_source.GetCachedData() ? v8::ScriptCompiler::kConsumeParserCache
- : v8::ScriptCompiler::kNoCompileOptions;
-
- return CompileRun(context, &script_source, options);
-}
-
-
// Helper functions that compile and run the source with given origin.
static inline v8::Local<v8::Value> CompileRunWithOrigin(const char* source,
const char* origin_url,
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 2840bccf8c..22757e0664 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -85,6 +85,7 @@
'test-cpu-profiler/JsNativeJsSample': [SKIP],
'test-cpu-profiler/NativeAccessorUninitializedIC': [SKIP],
'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
+ 'test-cpu-profiler/TracingCpuProfiler': [SKIP],
'test-sampler/LibSamplerCollectSample': [SKIP],
# BUG(7202). The test is flaky.
@@ -172,10 +173,6 @@
['arch == s390 or arch == s390x or arch == ppc or arch == ppc64', {
'test-multiple-return/*': [SKIP],
}],
-# TODO(ahaas): Port multiple return values to ARM, MIPS, S390 and PPC
-['arch == mips or arch == mips64 or arch == mipsel or arch == mips64el or arch == s390 or arch == s390x or arch == ppc or arch == ppc64', {
- 'test-multiple-return/ReturnLastValue*': [SKIP],
-}],
##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
@@ -197,9 +194,6 @@
'test-cpu-profiler/TickEvents': [SKIP],
# BUG(v8:6924). The test allocates a lot of memory.
'test-api/NewStringRangeError': [PASS, NO_VARIANTS],
-
- # BUG(chromium:794911).
- 'test-cpu-profiler/TracingCpuProfiler': [SKIP],
}], # 'tsan == True'
##############################################################################
@@ -322,12 +316,58 @@
}], # 'arch == mips64el or arch == mips64'
##############################################################################
-['arch == mips or arch == mipsel or arch == mips64 or arch == mips64el', {
- # For now skip wasm SIMD tests that fail when MSA instr. extension is not
- # available (currently simd-scalar-lowering mechanism doesn't work properly
- # for all SIMD operations)
- 'test-run-wasm-simd/*': [SKIP],
-}], # 'arch == mips or arch == mipsel or arch == mips64 or arch == mips64el'
+['(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips', {
+ # Skip tests that fail on MIPS architectures which don't support SIMD,
+ # because lowering mechanism doesn't work properly
+ 'test-run-wasm-simd/RunWasm_F32x4RecipApprox_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_F32x4RecipSqrtApprox_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_I32x4ConvertI16x8_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_I16x8ConvertI8x16_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_I16x8ConvertI32x4_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_I8x16ConvertI16x8_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4Select_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8Select_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16Select_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4NonCanonicalSelect_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8NonCanonicalSelect_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16NonCanonicalSelect_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_I16x8AddHoriz_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_F32x4AddHoriz_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4Dup_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4ZipLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4ZipRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4UnzipLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4UnzipRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4TransposeLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4TransposeRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x2Reverse_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S32x4Irregular_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8Dup_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8ZipLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8ZipRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8UnzipLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8UnzipRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8TransposeLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8TransposeRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x4Reverse_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x2Reverse_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S16x8Irregular_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16Dup_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16ZipLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16ZipRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16UnzipLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16UnzipRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16TransposeLeft_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16TransposeRight_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x8Reverse_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x4Reverse_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x2Reverse_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16Irregular_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_S8x16Concat_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_ReductionTest4_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_ReductionTest8_compiled': [SKIP],
+ 'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP],
+}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
##############################################################################
['arch == android_arm or arch == android_ia32', {
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 7e7e4be0b1..b44c9fca3e 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -62,7 +62,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
virtual byte* Generate() {
if (code_.is_null()) {
Schedule* schedule = this->Export();
- CallDescriptor* call_descriptor = this->call_descriptor();
+ auto call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
CompilationInfo info(ArrayVector("testing"), main_zone(), Code::STUB);
code_ = Pipeline::GenerateCodeForTesting(
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 2455d9f5b9..db50fa7f1c 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -5,7 +5,6 @@
#include "test/cctest/compiler/function-tester.h"
#include "src/api.h"
-#include "src/ast/ast-numbering.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/compiler/linkage.h"
@@ -150,7 +149,7 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
CHECK(function->is_compiled() ||
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION));
CHECK(info.shared_info()->HasBytecodeArray());
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, function->GetIsolate());
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index 24c2877938..66cdc8613a 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -247,11 +247,11 @@ class GraphBuilderTester : public HandleAndZoneScope,
virtual byte* Generate() {
if (code_.is_null()) {
Zone* zone = graph()->zone();
- CallDescriptor* desc =
+ auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
CompilationInfo info(ArrayVector("testing"), main_zone(), Code::STUB);
- code_ = Pipeline::GenerateCodeForTesting(&info, main_isolate(), desc,
- graph());
+ code_ = Pipeline::GenerateCodeForTesting(&info, main_isolate(),
+ call_descriptor, graph());
#ifdef ENABLE_DISASSEMBLER
if (!code_.is_null() && FLAG_print_opt_code) {
OFStream os(stdout);
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 8661a5eb5c..32548601ec 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -33,7 +33,7 @@ Node* UndefinedConstant(CodeAssembler& m) {
return m.LoadRoot(Heap::kUndefinedValueRootIndex);
}
-Node* SmiFromWord32(CodeAssembler& m, Node* value) {
+Node* SmiFromInt32(CodeAssembler& m, Node* value) {
value = m.ChangeInt32ToIntPtr(value);
return m.BitcastWordToTaggedSigned(
m.WordShl(value, kSmiShiftSize + kSmiTagSize));
@@ -505,7 +505,7 @@ TEST(GotoIfExceptionMultiple) {
error.Bind(UndefinedConstant(m));
string = m.CallStub(to_string, context, second_value);
m.GotoIfException(string, &exception_handler2, &error);
- m.Return(SmiFromWord32(m, return_value.value()));
+ m.Return(SmiFromInt32(m, return_value.value()));
// try { ToString(param3); return 7 & ~2; } catch (e) { return e; }
m.Bind(&exception_handler2);
@@ -513,7 +513,7 @@ TEST(GotoIfExceptionMultiple) {
error.Bind(UndefinedConstant(m));
string = m.CallStub(to_string, context, third_value);
m.GotoIfException(string, &exception_handler3, &error);
- m.Return(SmiFromWord32(
+ m.Return(SmiFromInt32(
m, m.Word32And(return_value.value(),
m.Word32Xor(m.Int32Constant(2), m.Int32Constant(-1)))));
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 10158c2741..46c2255f13 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -41,7 +41,8 @@ int GetSlotSizeInBytes(MachineRepresentation rep) {
}
// Forward declaration.
-Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
+Handle<Code> BuildTeardownFunction(Isolate* isolate,
+ CallDescriptor* call_descriptor,
std::vector<AllocatedOperand> parameters);
// Build the `setup` function. It takes a code object and a FixedArray as
@@ -69,15 +70,16 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
// | | | results into lanes of a new |
// | | | 128-bit vector. |
//
-Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
+Handle<Code> BuildSetupFunction(Isolate* isolate,
+ CallDescriptor* call_descriptor,
std::vector<AllocatedOperand> parameters) {
CodeAssemblerTester tester(isolate, 2);
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
params.push_back(__ Parameter(0));
- params.push_back(
- __ HeapConstant(BuildTeardownFunction(isolate, descriptor, parameters)));
+ params.push_back(__ HeapConstant(
+ BuildTeardownFunction(isolate, call_descriptor, parameters)));
// First allocate the FixedArray which will hold the final results. Here we
// should take care of all allocations, meaning we allocate HeapNumbers and
// FixedArrays representing Simd128 values.
@@ -123,7 +125,7 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
tester.raw_assembler_for_testing()->machine()->I32x4Splat(),
__ Int32Constant(0));
for (int lane = 0; lane < 4; lane++) {
- Node* lane_value = __ SmiToWord32(
+ Node* lane_value = __ SmiToInt32(
__ LoadFixedArrayElement(element, __ IntPtrConstant(lane)));
vector = tester.raw_assembler_for_testing()->AddNode(
tester.raw_assembler_for_testing()->machine()->I32x4ReplaceLane(
@@ -140,7 +142,7 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
params.push_back(element);
}
__ Return(tester.raw_assembler_for_testing()->AddNode(
- tester.raw_assembler_for_testing()->common()->Call(descriptor),
+ tester.raw_assembler_for_testing()->common()->Call(call_descriptor),
static_cast<int>(params.size()), params.data()));
return tester.GenerateCodeCloseAndEscape();
}
@@ -187,9 +189,10 @@ Handle<Code> BuildSetupFunction(Isolate* isolate, CallDescriptor* descriptor,
// SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may clobber the
// top 64 bits of Simd128 registers. This is the case on x64, ia32 and Arm64 for
// example.
-Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
+Handle<Code> BuildTeardownFunction(Isolate* isolate,
+ CallDescriptor* call_descriptor,
std::vector<AllocatedOperand> parameters) {
- CodeAssemblerTester tester(isolate, descriptor);
+ CodeAssemblerTester tester(isolate, call_descriptor);
CodeStubAssembler assembler(tester.state());
Node* result_array = __ Parameter(1);
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
@@ -203,7 +206,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
case MachineRepresentation::kFloat32:
param =
tester.raw_assembler_for_testing()->ChangeFloat32ToFloat64(param);
- // Fallthrough
+ V8_FALLTHROUGH;
case MachineRepresentation::kFloat64:
__ StoreObjectFieldNoWriteBarrier(
__ LoadFixedArrayElement(result_array, i), HeapNumber::kValueOffset,
@@ -213,7 +216,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate, CallDescriptor* descriptor,
Node* vector = __ LoadFixedArrayElement(result_array, i);
for (int lane = 0; lane < 4; lane++) {
Node* lane_value =
- __ SmiFromWord32(tester.raw_assembler_for_testing()->AddNode(
+ __ SmiFromInt32(tester.raw_assembler_for_testing()->AddNode(
tester.raw_assembler_for_testing()
->machine()
->I32x4ExtractLane(lane),
@@ -275,13 +278,7 @@ void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value,
}
bool TestSimd128Moves() {
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
- // TODO(mips): Implement support for the kSimd128 representation in
- // AssembleMove and AssembleSwap on MIPS.
- return false;
-#else
return CpuFeatures::SupportsWasmSimd128();
-#endif
}
} // namespace
@@ -962,7 +959,7 @@ class CodeGeneratorTester {
generator_(environment->main_zone(), &frame_, &linkage_,
environment->code(), &info_, environment->main_isolate(),
base::Optional<OsrHelper>(), kNoSourcePosition, nullptr,
- nullptr) {
+ nullptr, LoadPoisoning::kDontPoison) {
// Force a frame to be created.
generator_.frame_access_state()->MarkHasFrame(true);
generator_.AssembleConstructFrame();
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 3ae652e869..012218974d 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -702,6 +702,7 @@ TEST(RemoveToNumberEffects) {
break;
case 2:
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
+ break;
case 3:
effect_use = R.graph.NewNode(R.javascript.Add(R.binop_hints), ton, ton,
R.context(), frame_state, ton, R.start());
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 13f493e82d..f3f09a99e8 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -35,11 +35,9 @@ static Handle<JSFunction> Compile(const char* source) {
.ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- source_code, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
- MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
+ source_code, Compiler::ScriptDetails(), v8::ScriptOriginOptions(),
nullptr, nullptr, v8::ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
- MaybeHandle<FixedArray>())
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
@@ -52,8 +50,8 @@ TEST(TestLinkageCreate) {
Handle<SharedFunctionInfo> shared(function->shared());
CompilationInfo info(handles.main_zone(), function->GetIsolate(), shared,
function);
- CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
- CHECK(descriptor);
+ auto call_descriptor = Linkage::ComputeIncoming(info.zone(), &info);
+ CHECK(call_descriptor);
}
@@ -69,13 +67,13 @@ TEST(TestLinkageJSFunctionIncoming) {
Handle<SharedFunctionInfo> shared(function->shared());
CompilationInfo info(handles.main_zone(), function->GetIsolate(), shared,
function);
- CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
- CHECK(descriptor);
+ auto call_descriptor = Linkage::ComputeIncoming(info.zone(), &info);
+ CHECK(call_descriptor);
- CHECK_EQ(1 + i, static_cast<int>(descriptor->JSParameterCount()));
- CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
- CHECK_EQ(Operator::kNoProperties, descriptor->properties());
- CHECK_EQ(true, descriptor->IsJSFunctionCall());
+ CHECK_EQ(1 + i, static_cast<int>(call_descriptor->JSParameterCount()));
+ CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
+ CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
+ CHECK_EQ(true, call_descriptor->IsJSFunctionCall());
}
}
@@ -88,13 +86,13 @@ TEST(TestLinkageJSCall) {
function);
for (int i = 0; i < 32; i++) {
- CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
+ auto call_descriptor = Linkage::GetJSCallDescriptor(
info.zone(), false, i, CallDescriptor::kNoFlags);
- CHECK(descriptor);
- CHECK_EQ(i, static_cast<int>(descriptor->JSParameterCount()));
- CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
- CHECK_EQ(Operator::kNoProperties, descriptor->properties());
- CHECK_EQ(true, descriptor->IsJSFunctionCall());
+ CHECK(call_descriptor);
+ CHECK_EQ(i, static_cast<int>(call_descriptor->JSParameterCount()));
+ CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
+ CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
+ CHECK_EQ(true, call_descriptor->IsJSFunctionCall());
}
}
@@ -109,14 +107,14 @@ TEST(TestLinkageStubCall) {
Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable = Builtins::CallableFor(isolate, Builtins::kToNumber);
CompilationInfo info(ArrayVector("test"), &zone, Code::STUB);
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate, &zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
- CHECK(descriptor);
- CHECK_EQ(0, static_cast<int>(descriptor->StackParameterCount()));
- CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
- CHECK_EQ(Operator::kNoProperties, descriptor->properties());
- CHECK_EQ(false, descriptor->IsJSFunctionCall());
+ CHECK(call_descriptor);
+ CHECK_EQ(0, static_cast<int>(call_descriptor->StackParameterCount()));
+ CHECK_EQ(1, static_cast<int>(call_descriptor->ReturnCount()));
+ CHECK_EQ(Operator::kNoProperties, call_descriptor->properties());
+ CHECK_EQ(false, call_descriptor->IsJSFunctionCall());
// TODO(titzer): test linkage creation for outgoing stub calls.
}
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 08cd73f4ce..4f663ed658 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -429,7 +429,7 @@ TEST(SignednessInWord32) {
CheckChange(IrOpcode::kCheckedTruncateTaggedToWord32,
MachineRepresentation::kTagged, Type::NonInternal(),
MachineRepresentation::kWord32,
- UseInfo::CheckedNumberOrOddballAsWord32());
+ UseInfo::CheckedNumberOrOddballAsWord32(VectorSlotPair()));
CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
IrOpcode::kTruncateFloat64ToFloat32,
@@ -452,11 +452,13 @@ static void TestMinusZeroCheck(IrOpcode::Value expected, Type* from_type) {
expected, MachineRepresentation::kFloat64, from_type,
UseInfo::CheckedSignedSmallAsWord32(kIdentifyZeros, VectorSlotPair()));
- CheckChange(expected, MachineRepresentation::kFloat64, from_type,
- UseInfo::CheckedSigned32AsWord32(kDistinguishZeros));
+ CheckChange(
+ expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, VectorSlotPair()));
- CheckChange(expected, MachineRepresentation::kFloat64, from_type,
- UseInfo::CheckedSigned32AsWord32(kDistinguishZeros));
+ CheckChange(
+ expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, VectorSlotPair()));
}
TEST(MinusZeroCheck) {
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index ffcf0527a8..a093be400b 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -2971,7 +2971,6 @@ class CountBreakDebugDelegate : public v8::debug::DebugDelegate {
public:
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<int>&) override {
debug_break_count++;
}
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 7f752b8872..0fc4d66e99 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -6742,7 +6742,7 @@ TEST(RunComputedCodeObject) {
CallDescriptor* c = Linkage::GetSimplifiedCDescriptor(r.zone(), &sig);
LinkageLocation ret[] = {c->GetReturnLocation(0)};
Signature<LinkageLocation> loc(1, 0, ret);
- CallDescriptor* desc = new (r.zone()) CallDescriptor( // --
+ auto call_descriptor = new (r.zone()) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
MachineType::AnyTagged(), // target_type
c->GetInputLocation(0), // target_loc
@@ -6753,7 +6753,7 @@ TEST(RunComputedCodeObject) {
c->CalleeSavedFPRegisters(), // callee saved FP
CallDescriptor::kNoFlags, // flags
"c-call-as-code");
- Node* call = r.AddNode(r.common()->Call(desc), phi);
+ Node* call = r.AddNode(r.common()->Call(call_descriptor), phi);
r.Return(call);
CHECK_EQ(33, r.Call(1));
@@ -6916,7 +6916,7 @@ TEST(Regression6640) {
// TODO(6792): No longer needed once WebAssembly code is off heap.
CodeSpaceMemoryModificationScope modification_scope(code->GetHeap());
it.rinfo()->update_wasm_function_table_size_reference(
- code->GetIsolate(), old_value, new_value, FLUSH_ICACHE_IF_NEEDED);
+ old_value, new_value, FLUSH_ICACHE_IF_NEEDED);
}
CHECK(m.Call());
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 42196e6454..c9c327553c 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -252,13 +252,12 @@ class Int32Signature : public MachineSignature {
}
};
-
-Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
- Schedule* schedule = nullptr) {
+Handle<Code> CompileGraph(const char* name, CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
CompilationInfo info(ArrayVector("testing"), graph->zone(), Code::STUB);
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, isolate, desc, graph, schedule);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, isolate, call_descriptor, graph, schedule);
CHECK(!code.is_null());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
@@ -269,10 +268,10 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
return code;
}
-
-Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
+Handle<Code> WrapWithCFunction(Handle<Code> inner,
+ CallDescriptor* call_descriptor) {
Zone zone(inner->GetIsolate()->allocator(), ZONE_NAME);
- int param_count = static_cast<int>(desc->ParameterCount());
+ int param_count = static_cast<int>(call_descriptor->ParameterCount());
GraphAndBuilders caller(&zone);
{
GraphAndBuilders& b = caller;
@@ -292,15 +291,15 @@ Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
args[index++] = start; // control.
// Build the call and return nodes.
- Node* call =
- b.graph()->NewNode(b.common()->Call(desc), param_count + 3, args);
+ Node* call = b.graph()->NewNode(b.common()->Call(call_descriptor),
+ param_count + 3, args);
Node* zero = b.graph()->NewNode(b.common()->Int32Constant(0));
Node* ret =
b.graph()->NewNode(b.common()->Return(), zero, call, call, start);
b.graph()->SetEnd(ret);
}
- MachineSignature* msig = desc->GetMachineSignature(&zone);
+ MachineSignature* msig = call_descriptor->GetMachineSignature(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig);
return CompileGraph("wrapper", cdesc, caller.graph());
@@ -419,9 +418,8 @@ void ArgsBuffer<float64>::Mutate() {
seed_++;
}
-
-int ParamCount(CallDescriptor* desc) {
- return static_cast<int>(desc->ParameterCount());
+int ParamCount(CallDescriptor* call_descriptor) {
+ return static_cast<int>(call_descriptor->ParameterCount());
}
@@ -638,8 +636,7 @@ static void Test_RunInt32SubWithRet(int retreg) {
Allocator params(parray, 2, nullptr, 0);
Allocator rets(rarray, 1, nullptr, 0);
RegisterConfig config(params, rets);
- CallDescriptor* desc = config.Create(&zone, &sig);
- TestInt32Sub(desc);
+ TestInt32Sub(config.Create(&zone, &sig));
}
}
@@ -687,8 +684,7 @@ TEST(Run_Int32Sub_all_allocatable_single) {
Allocator params(parray, 1, nullptr, 0);
Allocator rets(rarray, 1, nullptr, 0);
RegisterConfig config(params, rets);
- CallDescriptor* desc = config.Create(&zone, &sig);
- TestInt32Sub(desc);
+ TestInt32Sub(config.Create(&zone, &sig));
}
}
@@ -705,8 +701,7 @@ TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
Allocator params(parray, 2, nullptr, 0);
Allocator rets(rarray, 1, nullptr, 0);
RegisterConfig config(params, rets);
- CallDescriptor* desc = config.Create(&zone, &sig);
- CopyTwentyInt32(desc);
+ CopyTwentyInt32(config.Create(&zone, &sig));
}
}
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index 152ed448ef..4e5f99f413 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -20,10 +20,10 @@ namespace {
// Function that takes a number of pointer-sized integer arguments, calculates a
// weighted sum of them and returns it.
-Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* descriptor) {
- CodeAssemblerTester tester(isolate, descriptor, "callee");
+Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
+ CodeAssemblerTester tester(isolate, call_descriptor, "callee");
CodeStubAssembler assembler(tester.state());
- int param_count = static_cast<int>(descriptor->StackParameterCount());
+ int param_count = static_cast<int>(call_descriptor->StackParameterCount());
Node* sum = __ IntPtrConstant(0);
for (int i = 0; i < param_count; ++i) {
Node* product = __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
@@ -35,9 +35,9 @@ Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* descriptor) {
// Function that tail-calls another function with a number of pointer-sized
// integer arguments.
-Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* descriptor,
+Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
CallDescriptor* callee_descriptor, bool tail) {
- CodeAssemblerTester tester(isolate, descriptor, "caller");
+ CodeAssemblerTester tester(isolate, call_descriptor, "caller");
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index 2fbc90d46d..34d6212d73 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -21,10 +21,10 @@ namespace {
// Function that takes a number of pointer-sized integer arguments, calculates a
// weighted sum of them and returns it.
-Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* descriptor) {
- CodeAssemblerTester tester(isolate, descriptor, "callee");
+Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
+ CodeAssemblerTester tester(isolate, call_descriptor, "callee");
CodeStubAssembler assembler(tester.state());
- int param_count = static_cast<int>(descriptor->StackParameterCount());
+ int param_count = static_cast<int>(call_descriptor->StackParameterCount());
Node* sum = __ IntPtrConstant(0);
for (int i = 0; i < param_count; ++i) {
Node* product = __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
@@ -36,9 +36,9 @@ Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* descriptor) {
// Function that tail-calls another function with a number of pointer-sized
// integer arguments.
-Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* descriptor,
+Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
CallDescriptor* callee_descriptor) {
- CodeAssemblerTester tester(isolate, descriptor, "caller");
+ CodeAssemblerTester tester(isolate, call_descriptor, "caller");
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
diff --git a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
index 2f1536433f..30bdad59e6 100644
--- a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
@@ -32,14 +32,12 @@ static void UpdateFunctionTableSizeReferences(Handle<Code> code,
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsWasmFunctionTableSizeReference(mode)) {
- it.rinfo()->update_wasm_function_table_size_reference(isolate, old_size,
- new_size);
+ it.rinfo()->update_wasm_function_table_size_reference(old_size, new_size);
modified = true;
}
}
if (modified) {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
}
}
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index 5ee99c6320..d90d13cac5 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -44,8 +44,8 @@ TEST(ConcurrentMarking) {
new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
- concurrent_marking->WaitForTasks();
- concurrent_marking->EnsureCompleted();
+ concurrent_marking->Stop(
+ ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
@@ -66,12 +66,39 @@ TEST(ConcurrentMarkingReschedule) {
new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->ScheduleTasks();
- concurrent_marking->WaitForTasks();
- concurrent_marking->EnsureCompleted();
+ concurrent_marking->Stop(
+ ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(&shared, heap->undefined_value());
concurrent_marking->RescheduleTasksIfNeeded();
- concurrent_marking->WaitForTasks();
- concurrent_marking->EnsureCompleted();
+ concurrent_marking->Stop(
+ ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
+ delete concurrent_marking;
+}
+
+TEST(ConcurrentMarkingPreemptAndReschedule) {
+ if (!i::FLAG_concurrent_marking) return;
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ CcTest::CollectAllGarbage();
+ if (!heap->incremental_marking()->IsStopped()) return;
+ MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+
+ ConcurrentMarking::MarkingWorklist shared, bailout, on_hold;
+ WeakObjects weak_objects;
+ ConcurrentMarking* concurrent_marking =
+ new ConcurrentMarking(heap, &shared, &bailout, &on_hold, &weak_objects);
+ for (int i = 0; i < 5000; i++)
+ PublishSegment(&shared, heap->undefined_value());
+ concurrent_marking->ScheduleTasks();
+ concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
+ for (int i = 0; i < 5000; i++)
+ PublishSegment(&shared, heap->undefined_value());
+ concurrent_marking->RescheduleTasksIfNeeded();
+ concurrent_marking->Stop(
+ ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking;
}
@@ -85,8 +112,8 @@ TEST(ConcurrentMarkingMarkedBytes) {
CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return;
heap::SimulateIncrementalMarking(heap, false);
- heap->concurrent_marking()->WaitForTasks();
- heap->concurrent_marking()->EnsureCompleted();
+ heap->concurrent_marking()->Stop(
+ ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
}
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index d14d39b9ee..8b735a2d70 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -112,8 +112,6 @@ TEST(ContextMaps) {
Context::STRING_FUNCTION_INDEX);
VerifyStoredPrototypeMap(isolate, Context::REGEXP_PROTOTYPE_MAP_INDEX,
Context::REGEXP_FUNCTION_INDEX);
- VerifyStoredPrototypeMap(isolate, Context::PROMISE_PROTOTYPE_MAP_INDEX,
- Context::PROMISE_FUNCTION_INDEX);
}
TEST(InitialObjects) {
@@ -1333,35 +1331,45 @@ TEST(CompilationCacheCachingBehavior) {
}
// The script should be in the cache now.
- InfoVectorPair pair = compilation_cache->LookupScript(
- source, Handle<Object>(), 0, 0, v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
- CHECK(pair.has_shared());
-
- // Check that the code cache entry survives at least on GC.
- // (Unless --optimize-for-size, in which case it might get collected
- // immediately.)
- if (!FLAG_optimize_for_size) {
- CcTest::CollectAllGarbage();
- pair = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
- CHECK(pair.has_shared());
+ {
+ v8::HandleScope scope(CcTest::isolate());
+ MaybeHandle<SharedFunctionInfo> cached_script =
+ compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
+ v8::ScriptOriginOptions(true, false),
+ native_context, language_mode);
+ CHECK(!cached_script.is_null());
}
- // Progress code age until it's old and ready for GC.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- CHECK(pair.shared()->HasBytecodeArray());
- pair.shared()->bytecode_array()->MakeOlder();
+ // Check that the code cache entry survives at least one GC.
+ {
+ CcTest::CollectAllGarbage();
+ v8::HandleScope scope(CcTest::isolate());
+ MaybeHandle<SharedFunctionInfo> cached_script =
+ compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
+ v8::ScriptOriginOptions(true, false),
+ native_context, language_mode);
+ CHECK(!cached_script.is_null());
+
+ // Progress code age until it's old and ready for GC.
+ Handle<SharedFunctionInfo> shared = cached_script.ToHandleChecked();
+ CHECK(shared->HasBytecodeArray());
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ shared->bytecode_array()->MakeOlder();
+ }
}
CcTest::CollectAllGarbage();
- // Ensure code aging cleared the entry from the cache.
- pair = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(true, false),
- native_context, language_mode);
- CHECK(!pair.has_shared());
+
+ {
+ v8::HandleScope scope(CcTest::isolate());
+ // Ensure code aging cleared the entry from the cache.
+ MaybeHandle<SharedFunctionInfo> cached_script =
+ compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
+ v8::ScriptOriginOptions(true, false),
+ native_context, language_mode);
+ CHECK(cached_script.is_null());
+ }
}
@@ -3031,14 +3039,8 @@ static void CheckVectorIC(Handle<JSFunction> f, int slot_index,
Handle<FeedbackVector> vector = Handle<FeedbackVector>(f->feedback_vector());
FeedbackVectorHelper helper(vector);
FeedbackSlot slot = helper.slot(slot_index);
- if (vector->IsLoadIC(slot)) {
- LoadICNexus nexus(vector, slot);
- CHECK(nexus.StateFromFeedback() == desired_state);
- } else {
- CHECK(vector->IsKeyedLoadIC(slot));
- KeyedLoadICNexus nexus(vector, slot);
- CHECK(nexus.StateFromFeedback() == desired_state);
- }
+ FeedbackNexus nexus(vector, slot);
+ CHECK(nexus.StateFromFeedback() == desired_state);
}
TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
@@ -3408,7 +3410,8 @@ TEST(LargeObjectSlotRecording) {
class DummyVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, Object** start, Object** end) override {}
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {}
};
@@ -4238,7 +4241,7 @@ void CheckIC(Handle<JSFunction> function, int slot_index,
InlineCacheState state) {
FeedbackVector* vector = function->feedback_vector();
FeedbackSlot slot(slot_index);
- LoadICNexus nexus(vector, slot);
+ FeedbackNexus nexus(vector, slot);
CHECK_EQ(nexus.StateFromFeedback(), state);
}
@@ -5803,8 +5806,7 @@ Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::STUB, Handle<Code>(), Builtins::kNoBuiltinId,
- HandlerTable::Empty(isolate), MaybeHandle<ByteArray>(),
- DeoptimizationData::Empty(isolate), kImmovable);
+ MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate), kImmovable);
CHECK(code->IsCode());
return code;
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index bec9f978e8..38665486fa 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -80,12 +80,10 @@ class TestCodeRangeScope {
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
};
-static void VerifyMemoryChunk(Isolate* isolate,
- Heap* heap,
- CodeRange* code_range,
- size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable) {
+static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
+ CodeRange* code_range, size_t reserve_area_size,
+ size_t commit_area_size, Executability executable,
+ Space* space) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
{
@@ -99,7 +97,7 @@ static void VerifyMemoryChunk(Isolate* isolate,
(executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
- reserve_area_size, commit_area_size, executable, nullptr);
+ reserve_area_size, commit_area_size, executable, space);
size_t alignment = code_range != nullptr && code_range->valid()
? MemoryChunk::kAlignment
: CommitPageSize();
@@ -178,36 +176,22 @@ TEST(MemoryChunk) {
const size_t code_range_size = 32 * MB;
if (!code_range->SetUp(code_range_size)) return;
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- EXECUTABLE);
-
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- NOT_EXECUTABLE);
+ VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
+ initial_commit_area_size, EXECUTABLE, heap->code_space());
+
+ VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
+ initial_commit_area_size, NOT_EXECUTABLE,
+ heap->old_space());
delete code_range;
// Without a valid CodeRange, i.e., omitting SetUp.
code_range = new CodeRange(isolate);
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- EXECUTABLE);
-
- VerifyMemoryChunk(isolate,
- heap,
- code_range,
- reserve_area_size,
- initial_commit_area_size,
- NOT_EXECUTABLE);
+ VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
+ initial_commit_area_size, EXECUTABLE, heap->code_space());
+
+ VerifyMemoryChunk(isolate, heap, code_range, reserve_area_size,
+ initial_commit_area_size, NOT_EXECUTABLE,
+ heap->old_space());
delete code_range;
}
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index d6a93fdda2..802b588a1a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -348,11 +348,11 @@ void BytecodeExpectationsPrinter::PrintCodeSnippet(
void BytecodeExpectationsPrinter::PrintHandlers(
std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
stream << "handlers: [\n";
- HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
- for (int i = 0, num_entries = table->NumberOfRangeEntries(); i < num_entries;
+ HandlerTable table(*bytecode_array);
+ for (int i = 0, num_entries = table.NumberOfRangeEntries(); i < num_entries;
++i) {
- stream << " [" << table->GetRangeStart(i) << ", " << table->GetRangeEnd(i)
- << ", " << table->GetRangeHandler(i) << "],\n";
+ stream << " [" << table.GetRangeStart(i) << ", " << table.GetRangeEnd(i)
+ << ", " << table.GetRangeHandler(i) << "],\n";
}
stream << "]\n";
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 50d084fbc1..17d3498738 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -12,120 +12,109 @@ snippet: "
async function* f() { }
f();
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 216
+bytecode array length: 192
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(0),
- B(Star), R(1),
- B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(Mov), R(closure), R(2),
- B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(this), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 17 E> */ B(StackCheck),
+ B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
- B(Mov), R(context), R(5),
B(Ldar), R(0),
- /* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(0),
- /* 22 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(6),
- B(Star), R(6),
+ /* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(6),
+ B(Ldar), R(5),
/* 17 E> */ B(Throw),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(6), R(3),
- B(Jump), U8(100),
+ B(Star), R(1),
+ B(Mov), R(5), R(2),
+ B(Jump), U8(98),
B(LdaUndefined),
- B(Star), R(7),
- B(Mov), R(0), R(6),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(6), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(6), U8(1),
- /* 22 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
+ B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(7),
+ B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(7),
+ B(TestEqualStrictNoFeedback), R(6),
B(JumpIfTrue), U8(5),
- B(Ldar), R(6),
+ B(Ldar), R(5),
B(ReThrow),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(6), R(3),
+ B(Star), R(1),
+ B(Mov), R(5), R(2),
B(Jump), U8(55),
B(Jump), U8(39),
- B(Star), R(6),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(6), U8(4), U8(5),
B(Star), R(5),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(5), U8(4), U8(5),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(5),
- B(PushContext), R(6),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(8),
- B(Mov), R(0), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(7), U8(2),
- B(PopContext), R(6),
- B(Star), R(3),
- B(LdaSmi), I8(1),
+ B(Star), R(7),
+ B(Mov), R(0), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
+ B(PopContext), R(5),
B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(3),
B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(8),
- B(Star), R(3),
- B(LdaSmi), I8(2),
B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(4),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
- B(Ldar), R(4),
+ B(Ldar), R(3),
B(SetPendingMessage),
- B(Ldar), R(2),
+ B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(6), U8(3), I8(0),
B(Jump), U8(22),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(5),
- B(Mov), R(3), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(5), U8(3),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
/* 22 S> */ B(Return),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 22 S> */ B(Return),
- B(Ldar), R(3),
+ B(Ldar), R(2),
B(ReThrow),
B(LdaUndefined),
/* 22 S> */ B(Return),
]
constant pool: [
- Smi [37],
- Smi [79],
+ Smi [30],
+ Smi [70],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [40, 161, 169],
- [43, 122, 124],
+ [20, 137, 145],
+ [23, 98, 100],
]
---
@@ -133,141 +122,129 @@ snippet: "
async function* f() { yield 42 }
f();
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 262
+bytecode array length: 236
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(0),
- B(Star), R(1),
- B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(Mov), R(closure), R(2),
- B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(this), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 17 E> */ B(StackCheck),
+ B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
- B(Mov), R(context), R(5),
B(Ldar), R(0),
- /* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(0),
- /* 31 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(6),
- B(Star), R(6),
+ /* 17 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
- B(Ldar), R(6),
+ B(Ldar), R(5),
/* 17 E> */ B(Throw),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(6), R(3),
- B(Jump), U8(146),
+ B(Star), R(1),
+ B(Mov), R(5), R(2),
+ B(Jump), U8(142),
/* 22 S> */ B(LdaSmi), I8(42),
- B(Star), R(7),
- B(LdaFalse),
- B(Star), R(8),
- B(Mov), R(0), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(6), U8(3),
- B(SuspendGenerator), R(0), R(0), U8(6), U8(1),
- /* 31 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
+ B(LdaFalse),
+ B(Star), R(7),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(5), U8(3),
+ /* 22 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
- B(Ldar), R(6),
+ B(Ldar), R(5),
/* 22 E> */ B(Throw),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(6), R(3),
- B(Jump), U8(100),
+ B(Star), R(1),
+ B(Mov), R(5), R(2),
+ B(Jump), U8(98),
B(LdaUndefined),
- B(Star), R(7),
- B(Mov), R(0), R(6),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(6), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(6), U8(2),
- /* 31 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
+ B(SuspendGenerator), R(0), R(0), U8(5), U8(2),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(7),
+ B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(7),
+ B(TestEqualStrictNoFeedback), R(6),
B(JumpIfTrue), U8(5),
- B(Ldar), R(6),
+ B(Ldar), R(5),
B(ReThrow),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(6), R(3),
+ B(Star), R(1),
+ B(Mov), R(5), R(2),
B(Jump), U8(55),
B(Jump), U8(39),
- B(Star), R(6),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(6), U8(7), U8(8),
B(Star), R(5),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(5), U8(7), U8(8),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(5),
- B(PushContext), R(6),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(8),
- B(Mov), R(0), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(7), U8(2),
- B(PopContext), R(6),
- B(Star), R(3),
- B(LdaSmi), I8(1),
+ B(Star), R(7),
+ B(Mov), R(0), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
+ B(PopContext), R(5),
B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(3),
B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(8),
- B(Star), R(3),
- B(LdaSmi), I8(2),
B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(4),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
- B(Ldar), R(4),
+ B(Ldar), R(3),
B(SetPendingMessage),
- B(Ldar), R(2),
+ B(Ldar), R(1),
B(SwitchOnSmiNoFeedback), U8(9), U8(3), I8(0),
B(Jump), U8(22),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(5),
- B(Mov), R(3), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(5), U8(3),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
/* 31 S> */ B(Return),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 31 S> */ B(Return),
- B(Ldar), R(3),
+ B(Ldar), R(2),
B(ReThrow),
B(LdaUndefined),
/* 31 S> */ B(Return),
]
constant pool: [
- Smi [37],
- Smi [83],
- Smi [125],
+ Smi [30],
+ Smi [74],
+ Smi [114],
Smi [15],
Smi [7],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [40, 207, 215],
- [43, 168, 170],
+ [20, 181, 189],
+ [23, 142, 144],
]
---
@@ -275,69 +252,53 @@ snippet: "
async function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 23
+frame size: 22
parameter count: 1
-bytecode array length: 536
+bytecode array length: 496
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(12),
- B(RestoreGeneratorState), R(2),
- B(Star), R(11),
- B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(Mov), R(closure), R(11),
+ B(Mov), R(this), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
B(Star), R(2),
/* 17 E> */ B(StackCheck),
+ B(Mov), R(context), R(13),
B(Mov), R(context), R(14),
- B(Mov), R(context), R(15),
B(Ldar), R(2),
- /* 17 E> */ B(SuspendGenerator), R(2), R(0), U8(16), U8(0),
- /* 50 S> */ B(Return),
- B(ResumeGenerator), R(2), R(11), R(0), U8(16),
- B(Star), R(16),
+ /* 17 E> */ B(SuspendGenerator), R(2), R(0), U8(15), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(15),
+ B(Star), R(15),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
- B(Ldar), R(16),
+ B(Ldar), R(15),
/* 17 E> */ B(Throw),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(16), R(13),
- B(JumpConstant), U8(20),
+ B(Star), R(11),
+ B(Mov), R(15), R(12),
+ B(JumpConstant), U8(19),
B(LdaZero),
B(Star), R(7),
+ B(Mov), R(context), R(17),
B(Mov), R(context), R(18),
- B(Mov), R(context), R(19),
/* 36 S> */ B(CreateArrayLiteral), U8(5), U8(0), U8(37),
+ B(Star), R(19),
+ B(LdaNamedProperty), R(19), U8(6), U8(1),
B(Star), R(20),
- B(LdaNamedProperty), R(20), U8(6), U8(1),
- B(Star), R(21),
- B(CallProperty0), R(21), R(20), U8(3),
+ B(CallProperty0), R(20), R(19), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
/* 36 E> */ B(LdaNamedProperty), R(4), U8(7), U8(5),
B(Star), R(5),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(8), U8(1), I8(1),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 31 S> */ B(CallProperty0), R(5), R(4), U8(7),
B(Star), R(6),
/* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(9), U8(9),
- B(JumpIfToBooleanTrue), U8(70),
- B(LdaNamedProperty), R(6), U8(10), U8(11),
+ B(LdaNamedProperty), R(6), U8(8), U8(9),
+ B(JumpIfToBooleanTrue), U8(68),
+ B(LdaNamedProperty), R(6), U8(9), U8(11),
B(Star), R(8),
B(LdaSmi), I8(2),
B(Star), R(7),
@@ -345,57 +306,56 @@ bytecodes: [
/* 22 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 42 S> */ B(LdaFalse),
- B(Star), R(22),
- B(Mov), R(2), R(20),
- B(Mov), R(0), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
- /* 50 S> */ B(Return),
- B(ResumeGenerator), R(2), R(11), R(0), U8(20),
- B(Star), R(20),
+ B(Star), R(21),
+ B(Mov), R(2), R(19),
+ B(Mov), R(0), R(20),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(19), U8(3),
+ /* 42 E> */ B(SuspendGenerator), R(2), R(0), U8(19), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(19),
+ B(Star), R(19),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
- B(Ldar), R(20),
+ B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
+ B(Ldar), R(19),
/* 42 E> */ B(Throw),
B(LdaZero),
- B(Star), R(16),
- B(Mov), R(20), R(17),
+ B(Star), R(15),
+ B(Mov), R(19), R(16),
B(Jump), U8(62),
B(LdaZero),
B(Star), R(7),
- B(JumpLoop), U8(103), I8(0),
+ B(JumpLoop), U8(87), I8(0),
B(Jump), U8(40),
- B(Star), R(20),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(20), U8(13), U8(14),
B(Star), R(19),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(19), U8(12), U8(13),
+ B(Star), R(18),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(19),
- B(PushContext), R(20),
+ B(Ldar), R(18),
+ B(PushContext), R(19),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(7), U8(13),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
- B(PopContext), R(20),
+ B(Star), R(20),
+ B(CallRuntime), U16(Runtime::kReThrow), R(20), U8(1),
+ B(PopContext), R(19),
B(LdaSmi), I8(-1),
- B(Star), R(17),
B(Star), R(16),
+ B(Star), R(15),
B(Jump), U8(8),
- B(Star), R(17),
- B(LdaSmi), I8(1),
B(Star), R(16),
+ B(LdaSmi), I8(1),
+ B(Star), R(15),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(18),
+ B(Star), R(17),
B(LdaZero),
B(TestEqualStrict), R(7), U8(14),
B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(15), U8(15),
+ B(LdaNamedProperty), R(4), U8(14), U8(15),
B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
@@ -407,138 +367,136 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(18),
+ B(LdaConstant), U8(15),
B(Star), R(19),
- B(LdaConstant), U8(16),
- B(Star), R(20),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
B(Throw),
- B(Mov), R(context), R(19),
- B(Mov), R(9), R(20),
- B(Mov), R(4), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
+ B(Mov), R(context), R(18),
+ B(Mov), R(9), R(19),
+ B(Mov), R(4), R(20),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(19),
+ B(Ldar), R(18),
B(Jump), U8(27),
- B(Mov), R(9), R(19),
- B(Mov), R(4), R(20),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
+ B(Mov), R(9), R(18),
+ B(Mov), R(4), R(19),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
B(Star), R(10),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(18),
+ B(Ldar), R(17),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(SwitchOnSmiNoFeedback), U8(17), U8(2), I8(0),
+ B(Ldar), R(15),
+ B(SwitchOnSmiNoFeedback), U8(16), U8(2), I8(0),
B(Jump), U8(13),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(17), R(13),
- B(Jump), U8(103),
- B(Ldar), R(17),
+ B(Star), R(11),
+ B(Mov), R(16), R(12),
+ B(Jump), U8(101),
+ B(Ldar), R(16),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(17),
- B(Mov), R(2), R(16),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(16), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(16), U8(2),
- /* 50 S> */ B(Return),
- B(ResumeGenerator), R(2), R(11), R(0), U8(16),
B(Star), R(16),
+ B(Mov), R(2), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(15), U8(2),
+ B(SuspendGenerator), R(2), R(0), U8(15), U8(2),
+ B(ResumeGenerator), R(2), R(0), U8(15),
+ B(Star), R(15),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(17),
+ B(Star), R(16),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(17),
+ B(TestEqualStrictNoFeedback), R(16),
B(JumpIfTrue), U8(5),
- B(Ldar), R(16),
+ B(Ldar), R(15),
B(ReThrow),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(16), R(13),
+ B(Star), R(11),
+ B(Mov), R(15), R(12),
B(Jump), U8(55),
B(Jump), U8(39),
- B(Star), R(16),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(16), U8(13), U8(19),
B(Star), R(15),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(15), U8(12), U8(18),
+ B(Star), R(14),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
- B(PushContext), R(16),
+ B(Ldar), R(14),
+ B(PushContext), R(15),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(Mov), R(2), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(17), U8(2),
- B(PopContext), R(16),
- B(Star), R(13),
- B(LdaSmi), I8(1),
+ B(Star), R(17),
+ B(Mov), R(2), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(16), U8(2),
+ B(PopContext), R(15),
B(Star), R(12),
+ B(LdaSmi), I8(1),
+ B(Star), R(11),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(13),
B(Star), R(12),
+ B(Star), R(11),
B(Jump), U8(8),
- B(Star), R(13),
- B(LdaSmi), I8(2),
B(Star), R(12),
+ B(LdaSmi), I8(2),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(14),
+ B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(2), U8(1),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(21), U8(3), I8(0),
+ B(Ldar), R(11),
+ B(SwitchOnSmiNoFeedback), U8(20), U8(3), I8(0),
B(Jump), U8(22),
B(LdaTrue),
- B(Star), R(17),
- B(Mov), R(2), R(15),
- B(Mov), R(13), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(15), U8(3),
+ B(Star), R(16),
+ B(Mov), R(2), R(14),
+ B(Mov), R(12), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(14), U8(3),
/* 50 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(12),
/* 50 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
B(LdaUndefined),
/* 50 S> */ B(Return),
]
constant pool: [
- Smi [37],
- Smi [103],
- Smi [399],
+ Smi [30],
+ Smi [154],
+ Smi [374],
Smi [15],
Smi [7],
TUPLE2_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [72],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [14],
- FIXED_ARRAY_TYPE,
- Smi [420],
+ SCOPE_INFO_TYPE,
+ Smi [402],
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [40, 481, 489],
- [43, 442, 444],
- [83, 263, 271],
- [86, 223, 225],
- [332, 342, 344],
+ [20, 441, 449],
+ [23, 402, 404],
+ [61, 225, 233],
+ [64, 185, 187],
+ [294, 304, 306],
]
---
@@ -547,237 +505,214 @@ snippet: "
async function* f() { yield* g() }
f();
"
-frame size: 18
+frame size: 17
parameter count: 1
-bytecode array length: 526
+bytecode array length: 482
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(0),
- B(Star), R(1),
- B(SwitchOnSmiNoFeedback), U8(0), U8(5), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(Mov), R(closure), R(2),
- B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(this), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 44 E> */ B(StackCheck),
+ B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
- B(Mov), R(context), R(5),
B(Ldar), R(0),
- /* 44 E> */ B(SuspendGenerator), R(0), R(0), U8(6), U8(0),
- /* 60 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(6),
- B(Star), R(6),
+ /* 44 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
- B(Ldar), R(6),
+ B(Ldar), R(5),
/* 44 E> */ B(Throw),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(6), R(3),
- B(JumpConstant), U8(22),
+ B(Star), R(1),
+ B(Mov), R(5), R(2),
+ B(JumpConstant), U8(19),
/* 49 S> */ B(LdaGlobal), U8(7), U8(0),
- B(Star), R(13),
- /* 56 E> */ B(CallUndefinedReceiver0), R(13), U8(2),
- B(Star), R(11),
- B(LdaNamedProperty), R(11), U8(8), U8(4),
+ B(Star), R(12),
+ /* 56 E> */ B(CallUndefinedReceiver0), R(12), U8(2),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(8), U8(4),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(12),
- B(CallProperty0), R(12), R(11), U8(6),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(10), U8(6),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(11), U8(9), U8(8),
- B(Star), R(12),
- B(CallProperty0), R(12), R(11), U8(10),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(12), U8(1),
- B(Star), R(8),
- B(LdaNamedProperty), R(8), U8(10), U8(12),
- B(Star), R(10),
- B(LdaUndefined),
+ B(LdaNamedProperty), R(10), U8(9), U8(8),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(10), U8(10),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(11), U8(1),
+ B(Star), R(7),
+ B(LdaNamedProperty), R(7), U8(10), U8(12),
B(Star), R(9),
+ B(LdaUndefined),
+ B(Star), R(8),
B(LdaZero),
- B(Star), R(7),
- B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(11), U8(3), I8(1),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
- B(Ldar), R(7),
- B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(1),
- B(CallProperty1), R(10), R(8), R(9), U8(14),
- B(Jump), U8(112),
- B(LdaNamedProperty), R(8), U8(16), U8(16),
+ B(Star), R(6),
+ B(Ldar), R(6),
+ B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(1),
+ B(CallProperty1), R(9), R(7), R(8), U8(14),
+ B(Jump), U8(110),
+ B(LdaNamedProperty), R(7), U8(13), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(13),
- B(CallProperty1), R(13), R(8), R(9), U8(18),
- B(Jump), U8(95),
+ B(Star), R(12),
+ B(CallProperty1), R(12), R(7), R(8), U8(18),
+ B(Jump), U8(93),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(9), R(3),
- B(JumpConstant), U8(23),
- B(LdaNamedProperty), R(8), U8(17), U8(20),
+ B(Star), R(1),
+ B(Mov), R(8), R(2),
+ B(JumpConstant), U8(20),
+ B(LdaNamedProperty), R(7), U8(14), U8(20),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(13),
- B(CallProperty1), R(13), R(8), R(9), U8(22),
- B(Jump), U8(70),
- B(LdaNamedProperty), R(8), U8(16), U8(24),
- B(JumpIfUndefined), U8(59),
- B(JumpIfNull), U8(57),
- B(Star), R(13),
- B(CallProperty0), R(13), R(8), U8(26),
+ B(Star), R(12),
+ B(CallProperty1), R(12), R(7), R(8), U8(22),
+ B(Jump), U8(68),
+ B(LdaNamedProperty), R(7), U8(13), U8(24),
+ B(JumpIfUndefined), U8(57),
+ B(JumpIfNull), U8(55),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(7), U8(26),
B(Jump), U8(2),
- B(Star), R(14),
- B(Mov), R(0), R(13),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(13), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(13), U8(2),
- /* 60 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(13),
B(Star), R(13),
+ B(Mov), R(0), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
+ /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(12),
+ B(Star), R(12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(14),
+ B(Star), R(13),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(14),
+ B(TestEqualStrictNoFeedback), R(13),
B(JumpIfTrue), U8(5),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(JumpIfJSReceiver), U8(9),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(15), U8(1),
- B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
B(Star), R(14),
- B(Mov), R(0), R(13),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(13), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(13), U8(3),
- /* 60 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(13),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
B(Star), R(13),
+ B(Mov), R(0), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
+ /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(2),
+ B(ResumeGenerator), R(0), R(0), U8(12),
+ B(Star), R(12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(14),
+ B(Star), R(13),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(14),
+ B(TestEqualStrictNoFeedback), R(13),
B(JumpIfTrue), U8(5),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
- B(Ldar), R(13),
- B(Mov), R(13), R(6),
+ B(Ldar), R(12),
+ B(Mov), R(12), R(5),
B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(18), U8(28),
- B(JumpIfToBooleanTrue), U8(40),
- B(LdaNamedProperty), R(6), U8(19), U8(30),
- B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
+ B(LdaNamedProperty), R(5), U8(15), U8(28),
+ B(JumpIfToBooleanTrue), U8(38),
+ B(LdaNamedProperty), R(5), U8(16), U8(30),
+ B(Star), R(15),
B(LdaFalse),
- B(Star), R(17),
- B(Mov), R(0), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(15), U8(3),
- B(SuspendGenerator), R(0), R(0), U8(15), U8(1),
- /* 60 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(15),
- B(Star), R(9),
+ B(Star), R(16),
+ B(Mov), R(0), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(14), U8(3),
+ /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(3),
+ B(ResumeGenerator), R(0), R(0), U8(14),
+ B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(6),
+ B(JumpLoop), U8(206), I8(0),
+ B(LdaNamedProperty), R(5), U8(16), U8(32),
B(Star), R(7),
- B(JumpLoop), U8(226), I8(0),
- B(LdaNamedProperty), R(6), U8(19), U8(32),
- B(Star), R(8),
B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(7),
+ B(TestEqualStrictNoFeedback), R(6),
B(JumpIfFalse), U8(10),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(8), R(3),
- B(Jump), U8(100),
+ B(Star), R(1),
+ B(Mov), R(7), R(2),
+ B(Jump), U8(98),
B(LdaUndefined),
- B(Star), R(7),
- B(Mov), R(0), R(6),
- B(CallJSRuntime), U8(%async_generator_await_uncaught), R(6), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(6), U8(4),
- /* 60 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(6),
B(Star), R(6),
+ B(Mov), R(0), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
+ B(SuspendGenerator), R(0), R(0), U8(5), U8(4),
+ B(ResumeGenerator), R(0), R(0), U8(5),
+ B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(7),
+ B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(7),
+ B(TestEqualStrictNoFeedback), R(6),
B(JumpIfTrue), U8(5),
- B(Ldar), R(6),
+ B(Ldar), R(5),
B(ReThrow),
B(LdaZero),
- B(Star), R(2),
- B(Mov), R(6), R(3),
+ B(Star), R(1),
+ B(Mov), R(5), R(2),
B(Jump), U8(55),
B(Jump), U8(39),
- B(Star), R(6),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(6), U8(20), U8(21),
B(Star), R(5),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(5), U8(17), U8(18),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(5),
- B(PushContext), R(6),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(8),
- B(Mov), R(0), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(7), U8(2),
- B(PopContext), R(6),
- B(Star), R(3),
- B(LdaSmi), I8(1),
+ B(Star), R(7),
+ B(Mov), R(0), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorReject), R(6), U8(2),
+ B(PopContext), R(5),
B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(3),
B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(8),
- B(Star), R(3),
- B(LdaSmi), I8(2),
B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(4),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(0), U8(1),
- B(Ldar), R(4),
+ B(Ldar), R(3),
B(SetPendingMessage),
- B(Ldar), R(2),
- B(SwitchOnSmiNoFeedback), U8(24), U8(3), I8(0),
+ B(Ldar), R(1),
+ B(SwitchOnSmiNoFeedback), U8(21), U8(3), I8(0),
B(Jump), U8(22),
B(LdaTrue),
- B(Star), R(7),
- B(Mov), R(0), R(5),
- B(Mov), R(3), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(5), U8(3),
+ B(Star), R(6),
+ B(Mov), R(0), R(4),
+ B(Mov), R(2), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorResolve), R(4), U8(3),
/* 60 S> */ B(Return),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 60 S> */ B(Return),
- B(Ldar), R(3),
+ B(Ldar), R(2),
B(ReThrow),
B(LdaUndefined),
/* 60 S> */ B(Return),
]
constant pool: [
- Smi [37],
- Smi [124],
- Smi [124],
- Smi [124],
- Smi [389],
+ Smi [30],
+ Smi [201],
+ Smi [251],
+ Smi [310],
+ Smi [360],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [211],
- Smi [98],
- Smi [150],
Smi [11],
Smi [36],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
@@ -785,15 +720,15 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
- Smi [410],
- Smi [297],
+ SCOPE_INFO_TYPE,
+ Smi [388],
+ Smi [289],
Smi [6],
Smi [20],
Smi [23],
]
handlers: [
- [40, 471, 479],
- [43, 432, 434],
+ [20, 427, 435],
+ [23, 388, 390],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 951e4b5408..e4185c1178 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -723,7 +723,7 @@ bytecodes: [
/* 137 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index 2ddfd5512d..cd9416495e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -121,7 +121,7 @@ bytecodes: [
/* 103 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
@@ -174,7 +174,7 @@ bytecodes: [
/* 162 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index 3d262e4b83..1664f865c3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -122,7 +122,7 @@ bytecodes: [
/* 101 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index fd83a29fd9..ebf33ccf3d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -14,80 +14,64 @@ snippet: "
}
f();
"
-frame size: 24
+frame size: 23
parameter count: 1
-bytecode array length: 554
+bytecode array length: 514
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(13),
- B(RestoreGeneratorState), R(2),
- B(Star), R(12),
- B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(12),
- B(Mov), R(closure), R(13),
- B(Mov), R(this), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(Mov), R(closure), R(12),
+ B(Mov), R(this), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(11),
+ B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
- B(Mov), R(context), R(16),
B(LdaZero),
B(Star), R(7),
+ B(Mov), R(context), R(18),
B(Mov), R(context), R(19),
- B(Mov), R(context), R(20),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(21),
- B(LdaNamedProperty), R(21), U8(4), U8(1),
+ B(Star), R(20),
+ B(LdaNamedProperty), R(20), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(3),
+ B(Star), R(21),
+ B(CallProperty0), R(21), R(20), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(21), U8(5), U8(5),
- B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(7),
- B(Star), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(22), U8(1),
+ B(LdaNamedProperty), R(20), U8(5), U8(5),
+ B(Star), R(21),
+ B(CallProperty0), R(21), R(20), U8(7),
+ B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
B(Star), R(4),
/* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
B(Star), R(5),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(12),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
- B(Star), R(22),
- B(Mov), R(2), R(21),
- B(Mov), R(11), R(23),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(21), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
- /* 57 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(21),
B(Star), R(21),
+ B(Mov), R(2), R(20),
+ B(Mov), R(11), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(20), U8(3),
+ /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(20),
+ B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(22),
+ B(Star), R(21),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(22),
+ B(TestEqualStrictNoFeedback), R(21),
B(JumpIfTrue), U8(5),
- B(Ldar), R(21),
+ B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(21), R(6),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(21), U8(1),
+ B(Mov), R(20), R(6),
+ /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(8), U8(13),
+ B(LdaNamedProperty), R(6), U8(7), U8(13),
B(JumpIfToBooleanTrue), U8(25),
- B(LdaNamedProperty), R(6), U8(9), U8(15),
+ B(LdaNamedProperty), R(6), U8(8), U8(15),
B(Star), R(8),
B(LdaSmi), I8(2),
B(Star), R(7),
@@ -96,91 +80,66 @@ bytecodes: [
B(Mov), R(3), R(0),
B(LdaZero),
B(Star), R(7),
- B(JumpLoop), U8(98), I8(0),
+ B(JumpLoop), U8(82), I8(0),
B(Jump), U8(40),
- B(Star), R(21),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(21), U8(10), U8(11),
B(Star), R(20),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(20), U8(9), U8(10),
+ B(Star), R(19),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
+ B(Ldar), R(19),
+ B(PushContext), R(20),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(7), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(22),
- B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
- B(PopContext), R(21),
+ B(Star), R(21),
+ B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
+ B(PopContext), R(20),
B(LdaSmi), I8(-1),
- B(Star), R(18),
B(Star), R(17),
+ B(Star), R(16),
B(Jump), U8(7),
- B(Star), R(18),
- B(LdaZero),
B(Star), R(17),
+ B(LdaZero),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaZero),
B(TestEqualStrict), R(7), U8(18),
- B(JumpIfTrue), U8(171),
- B(LdaNamedProperty), R(4), U8(12), U8(19),
+ B(JumpIfTrue), U8(167),
+ B(LdaNamedProperty), R(4), U8(11), U8(19),
B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(160),
+ B(Jump), U8(156),
B(LdaSmi), I8(1),
B(TestEqualStrict), R(7), U8(21),
- B(JumpIfFalse), U8(88),
+ B(JumpIfFalse), U8(86),
B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(19),
+ B(LdaConstant), U8(12),
B(Star), R(20),
- B(LdaConstant), U8(13),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
- B(Mov), R(context), R(20),
- B(Mov), R(9), R(21),
- B(Mov), R(4), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
- B(Star), R(22),
- B(Mov), R(2), R(21),
- B(Mov), R(11), R(23),
- B(CallJSRuntime), U8(%async_function_await_caught), R(21), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(21), U8(1),
- /* 57 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(21),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(22),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(22),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(21),
- B(ReThrow),
- B(Ldar), R(21),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(20),
- B(Jump), U8(67),
+ B(Mov), R(context), R(19),
B(Mov), R(9), R(20),
B(Mov), R(4), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
B(Mov), R(2), R(20),
B(Mov), R(11), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(2),
- /* 57 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(20),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitCaught), R(20), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(20),
B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(Star), R(21),
@@ -189,94 +148,116 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(20), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
+ B(Ldar), R(20),
+ B(Jump), U8(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(19),
+ B(Jump), U8(65),
+ B(Mov), R(9), R(19),
+ B(Mov), R(4), R(20),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
+ B(Star), R(20),
+ B(Mov), R(2), R(19),
+ B(Mov), R(11), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(19), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
+ B(ResumeGenerator), R(2), R(0), U8(19),
+ B(Star), R(19),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(20),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(20),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(19),
+ B(ReThrow),
+ B(Mov), R(19), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(19),
+ B(Ldar), R(18),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(17),
+ B(TestEqualStrictNoFeedback), R(16),
B(JumpIfFalse), U8(5),
- B(Ldar), R(18),
+ B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(11), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
+ B(Star), R(17),
+ B(Mov), R(11), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(17),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(10), U8(14),
B(Star), R(16),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(16), U8(9), U8(13),
+ B(Star), R(15),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
+ B(Ldar), R(15),
+ B(PushContext), R(16),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaFalse),
- B(Star), R(20),
- B(Mov), R(11), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
- B(PopContext), R(17),
+ B(Star), R(19),
+ B(Mov), R(11), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
+ B(PopContext), R(16),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(14),
B(Star), R(13),
+ B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(14),
- B(LdaSmi), I8(1),
B(Star), R(13),
+ B(LdaSmi), I8(1),
+ B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Star), R(14),
B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(14),
+ B(Ldar), R(13),
/* 57 S> */ B(Return),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(ReThrow),
B(LdaUndefined),
/* 57 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [332],
- Smi [391],
+ Smi [110],
+ Smi [309],
+ Smi [366],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [34],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [46, 513, 521],
- [49, 471, 473],
- [55, 250, 258],
- [58, 210, 212],
- [318, 369, 371],
+ [26, 473, 481],
+ [29, 431, 433],
+ [35, 214, 222],
+ [38, 174, 176],
+ [282, 331, 333],
]
---
@@ -286,80 +267,64 @@ snippet: "
}
f();
"
-frame size: 24
+frame size: 23
parameter count: 1
-bytecode array length: 583
+bytecode array length: 543
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(13),
- B(RestoreGeneratorState), R(2),
- B(Star), R(12),
- B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(12),
- B(Mov), R(closure), R(13),
- B(Mov), R(this), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(Mov), R(closure), R(12),
+ B(Mov), R(this), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(11),
+ B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
- B(Mov), R(context), R(16),
B(LdaZero),
B(Star), R(7),
+ B(Mov), R(context), R(18),
B(Mov), R(context), R(19),
- B(Mov), R(context), R(20),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(21),
- B(LdaNamedProperty), R(21), U8(4), U8(1),
+ B(Star), R(20),
+ B(LdaNamedProperty), R(20), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(3),
+ B(Star), R(21),
+ B(CallProperty0), R(21), R(20), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(21), U8(5), U8(5),
- B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(7),
- B(Star), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(22), U8(1),
+ B(LdaNamedProperty), R(20), U8(5), U8(5),
+ B(Star), R(21),
+ B(CallProperty0), R(21), R(20), U8(7),
+ B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
B(Star), R(4),
/* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
B(Star), R(5),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(12),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
- B(Star), R(22),
- B(Mov), R(2), R(21),
- B(Mov), R(11), R(23),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(21), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
- /* 68 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(21),
B(Star), R(21),
+ B(Mov), R(2), R(20),
+ B(Mov), R(11), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(20), U8(3),
+ /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(20),
+ B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(22),
+ B(Star), R(21),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(22),
+ B(TestEqualStrictNoFeedback), R(21),
B(JumpIfTrue), U8(5),
- B(Ldar), R(21),
+ B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(21), R(6),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(21), U8(1),
+ B(Mov), R(20), R(6),
+ /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(8), U8(13),
+ B(LdaNamedProperty), R(6), U8(7), U8(13),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(6), U8(9), U8(15),
+ B(LdaNamedProperty), R(6), U8(8), U8(15),
B(Star), R(8),
B(LdaSmi), I8(2),
B(Star), R(7),
@@ -367,93 +332,68 @@ bytecodes: [
/* 23 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 56 S> */ B(LdaZero),
- B(Star), R(17),
- B(Mov), R(8), R(18),
+ B(Star), R(16),
+ B(Mov), R(8), R(17),
B(Jump), U8(56),
B(Jump), U8(40),
- B(Star), R(21),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(21), U8(10), U8(11),
B(Star), R(20),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(20), U8(9), U8(10),
+ B(Star), R(19),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
+ B(Ldar), R(19),
+ B(PushContext), R(20),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(7), U8(17),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(22),
- B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
- B(PopContext), R(21),
+ B(Star), R(21),
+ B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
+ B(PopContext), R(20),
B(LdaSmi), I8(-1),
- B(Star), R(18),
B(Star), R(17),
+ B(Star), R(16),
B(Jump), U8(8),
- B(Star), R(18),
- B(LdaSmi), I8(1),
B(Star), R(17),
+ B(LdaSmi), I8(1),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaZero),
B(TestEqualStrict), R(7), U8(18),
- B(JumpIfTrue), U8(171),
- B(LdaNamedProperty), R(4), U8(12), U8(19),
+ B(JumpIfTrue), U8(167),
+ B(LdaNamedProperty), R(4), U8(11), U8(19),
B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(160),
+ B(Jump), U8(156),
B(LdaSmi), I8(1),
B(TestEqualStrict), R(7), U8(21),
- B(JumpIfFalse), U8(88),
+ B(JumpIfFalse), U8(86),
B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(19),
+ B(LdaConstant), U8(12),
B(Star), R(20),
- B(LdaConstant), U8(13),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
- B(Mov), R(context), R(20),
- B(Mov), R(9), R(21),
- B(Mov), R(4), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
- B(Star), R(22),
- B(Mov), R(2), R(21),
- B(Mov), R(11), R(23),
- B(CallJSRuntime), U8(%async_function_await_caught), R(21), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(21), U8(1),
- /* 68 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(21),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(22),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(22),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(21),
- B(ReThrow),
- B(Ldar), R(21),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(20),
- B(Jump), U8(67),
+ B(Mov), R(context), R(19),
B(Mov), R(9), R(20),
B(Mov), R(4), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
B(Mov), R(2), R(20),
B(Mov), R(11), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(2),
- /* 68 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(20),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitCaught), R(20), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(20),
B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(Star), R(21),
@@ -462,106 +402,128 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(20), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
+ B(Ldar), R(20),
+ B(Jump), U8(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(19),
+ B(Jump), U8(65),
+ B(Mov), R(9), R(19),
+ B(Mov), R(4), R(20),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
+ B(Star), R(20),
+ B(Mov), R(2), R(19),
+ B(Mov), R(11), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(19), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
+ B(ResumeGenerator), R(2), R(0), U8(19),
+ B(Star), R(19),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(20),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(20),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(19),
+ B(ReThrow),
+ B(Mov), R(19), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(19),
+ B(Ldar), R(18),
B(SetPendingMessage),
- B(Ldar), R(17),
- B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
+ B(Ldar), R(16),
+ B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
B(Jump), U8(13),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(18), R(14),
+ B(Star), R(12),
+ B(Mov), R(17), R(13),
B(Jump), U8(81),
- B(Ldar), R(18),
+ B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(11), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
+ B(Star), R(17),
+ B(Mov), R(11), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
B(LdaSmi), I8(1),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
B(Jump), U8(59),
B(Jump), U8(43),
- B(Star), R(17),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(10), U8(16),
B(Star), R(16),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(16), U8(9), U8(15),
+ B(Star), R(15),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
+ B(Ldar), R(15),
+ B(PushContext), R(16),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaFalse),
- B(Star), R(20),
- B(Mov), R(11), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
- B(PopContext), R(17),
+ B(Star), R(19),
+ B(Mov), R(11), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
+ B(PopContext), R(16),
B(LdaSmi), I8(1),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(14),
B(Star), R(13),
+ B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(14),
- B(LdaSmi), I8(2),
B(Star), R(13),
+ B(LdaSmi), I8(2),
+ B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Star), R(14),
B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(17), U8(3), I8(0),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(16), U8(3), I8(0),
B(Jump), U8(21),
- B(Mov), R(11), R(16),
- B(Mov), R(14), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(16), U8(2),
+ B(Mov), R(11), R(15),
+ B(Mov), R(13), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(15), U8(2),
B(Ldar), R(11),
/* 68 S> */ B(Return),
- B(Ldar), R(14),
+ B(Ldar), R(13),
/* 68 S> */ B(Return),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(ReThrow),
B(LdaUndefined),
/* 68 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [335],
- Smi [394],
+ Smi [110],
+ Smi [312],
+ Smi [369],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [34],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [14],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [19],
Smi [22],
]
handlers: [
- [46, 529, 537],
- [49, 486, 488],
- [55, 252, 260],
- [58, 212, 214],
- [321, 372, 374],
+ [26, 489, 497],
+ [29, 446, 448],
+ [35, 216, 224],
+ [38, 176, 178],
+ [285, 334, 336],
]
---
@@ -574,80 +536,64 @@ snippet: "
}
f();
"
-frame size: 24
+frame size: 23
parameter count: 1
-bytecode array length: 572
+bytecode array length: 532
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(13),
- B(RestoreGeneratorState), R(2),
- B(Star), R(12),
- B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(12),
- B(Mov), R(closure), R(13),
- B(Mov), R(this), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(3),
+ B(Mov), R(closure), R(12),
+ B(Mov), R(this), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(11),
+ B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
- B(Mov), R(context), R(16),
B(LdaZero),
B(Star), R(7),
+ B(Mov), R(context), R(18),
B(Mov), R(context), R(19),
- B(Mov), R(context), R(20),
/* 43 S> */ B(CreateArrayLiteral), U8(3), U8(0), U8(37),
- B(Star), R(21),
- B(LdaNamedProperty), R(21), U8(4), U8(1),
+ B(Star), R(20),
+ B(LdaNamedProperty), R(20), U8(4), U8(1),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(3),
+ B(Star), R(21),
+ B(CallProperty0), R(21), R(20), U8(3),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(21), U8(5), U8(5),
- B(Star), R(22),
- B(CallProperty0), R(22), R(21), U8(7),
- B(Star), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(22), U8(1),
+ B(LdaNamedProperty), R(20), U8(5), U8(5),
+ B(Star), R(21),
+ B(CallProperty0), R(21), R(20), U8(7),
+ B(Star), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(21), U8(1),
B(Star), R(4),
/* 43 E> */ B(LdaNamedProperty), R(4), U8(6), U8(9),
B(Star), R(5),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(12),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 40 S> */ B(CallProperty0), R(5), R(4), U8(11),
- B(Star), R(22),
- B(Mov), R(2), R(21),
- B(Mov), R(11), R(23),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(21), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
- /* 114 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(21),
B(Star), R(21),
+ B(Mov), R(2), R(20),
+ B(Mov), R(11), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(20), U8(3),
+ /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(20), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(20),
+ B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(22),
+ B(Star), R(21),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(22),
+ B(TestEqualStrictNoFeedback), R(21),
B(JumpIfTrue), U8(5),
- B(Ldar), R(21),
+ B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(21), R(6),
- /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(21), U8(1),
+ B(Mov), R(20), R(6),
+ /* 40 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(8), U8(13),
+ B(LdaNamedProperty), R(6), U8(7), U8(13),
B(JumpIfToBooleanTrue), U8(43),
- B(LdaNamedProperty), R(6), U8(9), U8(15),
+ B(LdaNamedProperty), R(6), U8(8), U8(15),
B(Star), R(8),
B(LdaSmi), I8(2),
B(Star), R(7),
@@ -664,91 +610,66 @@ bytecodes: [
/* 103 S> */ B(Jump), U8(8),
B(LdaZero),
B(Star), R(7),
- B(JumpLoop), U8(116), I8(0),
+ B(JumpLoop), U8(100), I8(0),
B(Jump), U8(40),
- B(Star), R(21),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(21), U8(10), U8(11),
B(Star), R(20),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(20), U8(9), U8(10),
+ B(Star), R(19),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(20),
- B(PushContext), R(21),
+ B(Ldar), R(19),
+ B(PushContext), R(20),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(7), U8(19),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(22),
- B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
- B(PopContext), R(21),
+ B(Star), R(21),
+ B(CallRuntime), U16(Runtime::kReThrow), R(21), U8(1),
+ B(PopContext), R(20),
B(LdaSmi), I8(-1),
- B(Star), R(18),
B(Star), R(17),
+ B(Star), R(16),
B(Jump), U8(7),
- B(Star), R(18),
- B(LdaZero),
B(Star), R(17),
+ B(LdaZero),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaZero),
B(TestEqualStrict), R(7), U8(20),
- B(JumpIfTrue), U8(171),
- B(LdaNamedProperty), R(4), U8(12), U8(21),
+ B(JumpIfTrue), U8(167),
+ B(LdaNamedProperty), R(4), U8(11), U8(21),
B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(Jump), U8(160),
+ B(Jump), U8(156),
B(LdaSmi), I8(1),
B(TestEqualStrict), R(7), U8(23),
- B(JumpIfFalse), U8(88),
+ B(JumpIfFalse), U8(86),
B(Ldar), R(9),
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(19),
+ B(LdaConstant), U8(12),
B(Star), R(20),
- B(LdaConstant), U8(13),
- B(Star), R(21),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
- B(Mov), R(context), R(20),
- B(Mov), R(9), R(21),
- B(Mov), R(4), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
- B(Star), R(22),
- B(Mov), R(2), R(21),
- B(Mov), R(11), R(23),
- B(CallJSRuntime), U8(%async_function_await_caught), R(21), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(21), U8(1),
- /* 114 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(21),
- B(Star), R(21),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(22),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(22),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(21),
- B(ReThrow),
- B(Ldar), R(21),
- B(Jump), U8(6),
- B(LdaTheHole),
- B(SetPendingMessage),
- B(Ldar), R(20),
- B(Jump), U8(67),
+ B(Mov), R(context), R(19),
B(Mov), R(9), R(20),
B(Mov), R(4), R(21),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(21),
B(Mov), R(2), R(20),
B(Mov), R(11), R(22),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(20), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(20), U8(2),
- /* 114 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(20),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitCaught), R(20), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(20), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(20),
B(Star), R(20),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(Star), R(21),
@@ -757,94 +678,116 @@ bytecodes: [
B(JumpIfTrue), U8(5),
B(Ldar), R(20),
B(ReThrow),
- B(Mov), R(20), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(20), U8(1),
+ B(Ldar), R(20),
+ B(Jump), U8(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(19),
+ B(Jump), U8(65),
+ B(Mov), R(9), R(19),
+ B(Mov), R(4), R(20),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(19), U8(2),
+ B(Star), R(20),
+ B(Mov), R(2), R(19),
+ B(Mov), R(11), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(19), U8(3),
+ B(SuspendGenerator), R(2), R(0), U8(19), U8(2),
+ B(ResumeGenerator), R(2), R(0), U8(19),
+ B(Star), R(19),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
+ B(Star), R(20),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(20),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(19),
+ B(ReThrow),
+ B(Mov), R(19), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(19), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(19),
+ B(Ldar), R(18),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(17),
+ B(TestEqualStrictNoFeedback), R(16),
B(JumpIfFalse), U8(5),
- B(Ldar), R(18),
+ B(Ldar), R(17),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(18),
- B(Mov), R(11), R(17),
- B(CallJSRuntime), U8(%promise_resolve), R(17), U8(2),
+ B(Star), R(17),
+ B(Mov), R(11), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(16), U8(2),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(17),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(10), U8(14),
B(Star), R(16),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(16), U8(9), U8(13),
+ B(Star), R(15),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
- B(PushContext), R(17),
+ B(Ldar), R(15),
+ B(PushContext), R(16),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
+ B(Star), R(18),
B(LdaFalse),
- B(Star), R(20),
- B(Mov), R(11), R(18),
- B(CallJSRuntime), U8(%promise_internal_reject), R(18), U8(3),
- B(PopContext), R(17),
+ B(Star), R(19),
+ B(Mov), R(11), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(17), U8(3),
+ B(PopContext), R(16),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(11), R(14),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(14),
B(Star), R(13),
+ B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(14),
- B(LdaSmi), I8(1),
B(Star), R(13),
+ B(LdaSmi), I8(1),
+ B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Star), R(14),
B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(14),
+ B(Ldar), R(13),
/* 114 S> */ B(Return),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(ReThrow),
B(LdaUndefined),
/* 114 S> */ B(Return),
]
constant pool: [
- Smi [95],
- Smi [350],
- Smi [409],
+ Smi [110],
+ Smi [327],
+ Smi [384],
TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [34],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [46, 531, 539],
- [49, 489, 491],
- [55, 268, 276],
- [58, 228, 230],
- [336, 387, 389],
+ [26, 491, 499],
+ [29, 449, 451],
+ [35, 232, 240],
+ [38, 192, 194],
+ [300, 349, 351],
]
---
@@ -943,7 +886,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(17),
B(LdaConstant), U8(10),
B(Star), R(18),
@@ -980,7 +923,7 @@ bytecodes: [
B(LdaUndefined),
B(Star), R(15),
B(Mov), R(9), R(14),
- B(CallJSRuntime), U8(%promise_resolve), R(14), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(14), U8(2),
B(LdaSmi), I8(1),
B(Star), R(10),
B(Mov), R(9), R(11),
@@ -999,7 +942,7 @@ bytecodes: [
B(LdaFalse),
B(Star), R(17),
B(Mov), R(9), R(15),
- B(CallJSRuntime), U8(%promise_internal_reject), R(15), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(15), U8(3),
B(PopContext), R(14),
B(LdaSmi), I8(1),
B(Star), R(10),
@@ -1023,7 +966,7 @@ bytecodes: [
B(Jump), U8(21),
B(Mov), R(9), R(13),
B(Mov), R(11), R(14),
- B(CallJSRuntime), U8(%promise_resolve), R(13), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(13), U8(2),
B(Ldar), R(9),
/* 96 S> */ B(Return),
B(Ldar), R(11),
@@ -1042,12 +985,12 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [14],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [19],
Smi [22],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 46e62ed891..ea75bf6665 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -86,7 +86,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
@@ -126,7 +126,7 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
@@ -220,7 +220,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(13),
B(LdaConstant), U8(8),
B(Star), R(14),
@@ -262,7 +262,7 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
@@ -366,7 +366,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
@@ -406,7 +406,7 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
@@ -502,7 +502,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(11),
B(LdaConstant), U8(10),
B(Star), R(12),
@@ -546,7 +546,7 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 1ea568ac21..31cf55dad5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -90,7 +90,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(14),
B(LdaConstant), U8(7),
B(Star), R(15),
@@ -129,7 +129,7 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
@@ -261,7 +261,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(14),
B(LdaConstant), U8(11),
B(Star), R(15),
@@ -296,16 +296,16 @@ bytecodes: [
/* 54 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["1"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
@@ -408,7 +408,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(12),
B(LdaConstant), U8(9),
B(Star), R(13),
@@ -446,10 +446,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
@@ -503,7 +503,7 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(6),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(74),
+ B(LdaSmi), I8(76),
B(Star), R(18),
B(LdaConstant), U8(4),
B(Star), R(19),
@@ -559,7 +559,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
@@ -600,7 +600,7 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
@@ -617,48 +617,38 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 20
+frame size: 19
parameter count: 2
-bytecode array length: 320
+bytecode array length: 298
bytecodes: [
- B(Ldar), R(3),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(3), U8(1),
- B(PushContext), R(13),
- B(RestoreGeneratorState), R(3),
- B(Star), R(12),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(12),
+ B(SwitchOnGeneratorState), R(3), U8(0), U8(1),
B(CreateFunctionContext), U8(1),
- B(PushContext), R(13),
+ B(PushContext), R(12),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(closure), R(14),
- B(Mov), R(this), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(14), U8(2),
+ B(Mov), R(closure), R(13),
+ B(Mov), R(this), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
B(Star), R(3),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(3), R(0), U8(14), U8(0),
- /* 55 S> */ B(Return),
- B(ResumeGenerator), R(3), R(12), R(0), U8(14),
- B(Star), R(14),
+ /* 11 E> */ B(SuspendGenerator), R(3), R(0), U8(13), U8(0),
+ B(ResumeGenerator), R(3), R(0), U8(13),
+ B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
- B(Ldar), R(14),
+ B(Ldar), R(13),
/* 11 E> */ B(Throw),
- B(Ldar), R(14),
+ B(Ldar), R(13),
/* 55 S> */ B(Return),
B(LdaZero),
B(Star), R(8),
+ B(Mov), R(context), R(15),
B(Mov), R(context), R(16),
- B(Mov), R(context), R(17),
/* 35 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(17),
+ B(LdaNamedProperty), R(17), U8(3), U8(0),
B(Star), R(18),
- B(LdaNamedProperty), R(18), U8(3), U8(0),
- B(Star), R(19),
- B(CallProperty0), R(19), R(18), U8(2),
+ B(CallProperty0), R(18), R(17), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(5),
@@ -684,30 +674,30 @@ bytecodes: [
B(Star), R(8),
B(JumpLoop), U8(47), I8(0),
B(Jump), U8(36),
- B(Star), R(18),
- B(Ldar), R(closure),
- /* 50 E> */ B(CreateCatchContext), R(18), U8(7), U8(8),
- B(PushContext), R(18),
B(Star), R(17),
+ B(Ldar), R(closure),
+ /* 50 E> */ B(CreateCatchContext), R(17), U8(7), U8(8),
+ B(PushContext), R(17),
+ B(Star), R(16),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(8), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(8),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(19),
- B(CallRuntime), U16(Runtime::kReThrow), R(19), U8(1),
- B(PopContext), R(18),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kReThrow), R(18), U8(1),
+ B(PopContext), R(17),
B(LdaSmi), I8(-1),
- B(Star), R(15),
B(Star), R(14),
+ B(Star), R(13),
B(Jump), U8(7),
- B(Star), R(15),
- B(LdaZero),
B(Star), R(14),
+ B(LdaZero),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(16),
+ B(Star), R(15),
B(LdaZero),
B(TestEqualStrict), R(8), U8(13),
B(JumpIfTrue), U8(90),
@@ -723,41 +713,41 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
- B(Star), R(17),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(16),
B(LdaConstant), U8(10),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(17),
- B(Mov), R(10), R(18),
- B(Mov), R(5), R(19),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(18), U8(2),
+ B(Mov), R(context), R(16),
+ B(Mov), R(10), R(17),
+ B(Mov), R(5), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(17),
+ B(Ldar), R(16),
B(Jump), U8(27),
- B(Mov), R(10), R(17),
- B(Mov), R(5), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
+ B(Mov), R(10), R(16),
+ B(Mov), R(5), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
B(Star), R(11),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(16),
+ B(Ldar), R(15),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(14),
+ B(TestEqualStrictNoFeedback), R(13),
B(JumpIfFalse), U8(5),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(ReThrow),
B(LdaUndefined),
/* 55 S> */ B(Return),
]
constant pool: [
- Smi [37],
+ Smi [30],
Smi [10],
Smi [7],
SYMBOL_TYPE,
@@ -765,14 +755,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
]
handlers: [
- [78, 196, 204],
- [81, 160, 162],
- [264, 274, 276],
+ [56, 174, 182],
+ [59, 138, 140],
+ [242, 252, 254],
]
---
@@ -782,68 +772,52 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 19
+frame size: 18
parameter count: 2
-bytecode array length: 380
+bytecode array length: 342
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(12),
- B(RestoreGeneratorState), R(2),
- B(Star), R(11),
- B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
B(CreateFunctionContext), U8(1),
- B(PushContext), R(12),
+ B(PushContext), R(11),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(closure), R(13),
- B(Mov), R(this), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
+ B(Mov), R(closure), R(12),
+ B(Mov), R(this), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
B(Star), R(2),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(13), U8(0),
- /* 49 S> */ B(Return),
- B(ResumeGenerator), R(2), R(11), R(0), U8(13),
- B(Star), R(13),
+ /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(12), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(12),
+ B(Star), R(12),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(13),
+ B(Ldar), R(12),
/* 11 E> */ B(Throw),
- B(Ldar), R(13),
+ B(Ldar), R(12),
/* 49 S> */ B(Return),
B(LdaZero),
B(Star), R(7),
+ B(Mov), R(context), R(14),
B(Mov), R(context), R(15),
- B(Mov), R(context), R(16),
/* 35 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(16),
+ B(LdaNamedProperty), R(16), U8(4), U8(0),
B(Star), R(17),
- B(LdaNamedProperty), R(17), U8(4), U8(0),
- B(Star), R(18),
- B(CallProperty0), R(18), R(17), U8(2),
+ B(CallProperty0), R(17), R(16), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
/* 35 E> */ B(LdaNamedProperty), R(4), U8(5), U8(4),
B(Star), R(5),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(6), U8(1), I8(1),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 30 S> */ B(CallProperty0), R(5), R(4), U8(6),
B(Star), R(6),
/* 30 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(7), U8(8),
- B(JumpIfToBooleanTrue), U8(67),
- B(LdaNamedProperty), R(6), U8(8), U8(10),
+ B(LdaNamedProperty), R(6), U8(6), U8(8),
+ B(JumpIfToBooleanTrue), U8(65),
+ B(LdaNamedProperty), R(6), U8(7), U8(10),
B(Star), R(8),
B(LdaSmi), I8(2),
B(Star), R(7),
@@ -851,53 +825,52 @@ bytecodes: [
/* 21 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 40 S> */ B(LdaFalse),
- B(Star), R(18),
- B(Mov), R(0), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(17), U8(1),
- /* 49 S> */ B(Return),
- B(ResumeGenerator), R(2), R(11), R(0), U8(17),
B(Star), R(17),
+ B(Mov), R(0), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
+ /* 40 E> */ B(SuspendGenerator), R(2), R(0), U8(16), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(16),
+ B(Star), R(16),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
- B(Ldar), R(17),
+ B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(0),
+ B(Ldar), R(16),
/* 40 E> */ B(Throw),
B(LdaZero),
- B(Star), R(13),
- B(Mov), R(17), R(14),
+ B(Star), R(12),
+ B(Mov), R(16), R(13),
B(Jump), U8(58),
B(LdaZero),
B(Star), R(7),
- B(JumpLoop), U8(100), I8(0),
+ B(JumpLoop), U8(84), I8(0),
B(Jump), U8(36),
- B(Star), R(17),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(17), U8(11), U8(12),
- B(PushContext), R(17),
B(Star), R(16),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(16), U8(10), U8(11),
+ B(PushContext), R(16),
+ B(Star), R(15),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(7), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(18),
- B(CallRuntime), U16(Runtime::kReThrow), R(18), U8(1),
- B(PopContext), R(17),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kReThrow), R(17), U8(1),
+ B(PopContext), R(16),
B(LdaSmi), I8(-1),
- B(Star), R(14),
B(Star), R(13),
+ B(Star), R(12),
B(Jump), U8(8),
- B(Star), R(14),
- B(LdaSmi), I8(1),
B(Star), R(13),
+ B(LdaSmi), I8(1),
+ B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(15),
+ B(Star), R(14),
B(LdaZero),
B(TestEqualStrict), R(7), U8(13),
B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(13), U8(14),
+ B(LdaNamedProperty), R(4), U8(12), U8(14),
B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
@@ -909,64 +882,63 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(15),
+ B(LdaConstant), U8(13),
B(Star), R(16),
- B(LdaConstant), U8(14),
- B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(16),
- B(Mov), R(9), R(17),
- B(Mov), R(4), R(18),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
+ B(Mov), R(context), R(15),
+ B(Mov), R(9), R(16),
+ B(Mov), R(4), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(16),
+ B(Ldar), R(15),
B(Jump), U8(27),
- B(Mov), R(9), R(16),
- B(Mov), R(4), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
+ B(Mov), R(9), R(15),
+ B(Mov), R(4), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
B(Star), R(10),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Ldar), R(12),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(14),
+ B(Ldar), R(13),
/* 49 S> */ B(Return),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(ReThrow),
B(LdaUndefined),
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [37],
- Smi [96],
+ Smi [30],
+ Smi [144],
Smi [10],
Smi [7],
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [69],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
- [78, 249, 257],
- [81, 213, 215],
- [318, 328, 330],
+ [56, 211, 219],
+ [59, 175, 177],
+ [280, 290, 292],
]
---
@@ -1065,7 +1037,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
B(Star), R(21),
B(LdaConstant), U8(7),
B(Star), R(22),
@@ -1098,7 +1070,7 @@ bytecodes: [
B(LdaUndefined),
B(Star), R(19),
B(Mov), R(12), R(18),
- B(CallJSRuntime), U8(%promise_resolve), R(18), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(18), U8(2),
B(LdaZero),
B(Star), R(14),
B(Mov), R(12), R(15),
@@ -1117,7 +1089,7 @@ bytecodes: [
B(LdaFalse),
B(Star), R(21),
B(Mov), R(12), R(19),
- B(CallJSRuntime), U8(%promise_internal_reject), R(19), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(19), U8(3),
B(PopContext), R(18),
B(LdaZero),
B(Star), R(14),
@@ -1152,10 +1124,10 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
]
@@ -1174,118 +1146,102 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 25
+frame size: 24
parameter count: 2
-bytecode array length: 459
+bytecode array length: 423
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(13),
- B(RestoreGeneratorState), R(2),
- B(Star), R(12),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(12),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
B(CreateFunctionContext), U8(1),
- B(PushContext), R(13),
+ B(PushContext), R(12),
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(closure), R(14),
- B(Mov), R(this), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(14), U8(2),
+ B(Mov), R(closure), R(13),
+ B(Mov), R(this), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(13), U8(2),
B(Star), R(2),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(11),
+ B(Mov), R(context), R(15),
B(Mov), R(context), R(16),
- B(Mov), R(context), R(17),
B(LdaZero),
B(Star), R(7),
+ B(Mov), R(context), R(19),
B(Mov), R(context), R(20),
- B(Mov), R(context), R(21),
/* 40 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(21),
+ B(LdaNamedProperty), R(21), U8(1), U8(0),
B(Star), R(22),
- B(LdaNamedProperty), R(22), U8(1), U8(0),
- B(Star), R(23),
- B(CallProperty0), R(23), R(22), U8(2),
+ B(CallProperty0), R(22), R(21), U8(2),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
/* 40 E> */ B(LdaNamedProperty), R(4), U8(2), U8(4),
B(Star), R(5),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(3), U8(1), I8(0),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(12),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 35 S> */ B(CallProperty0), R(5), R(4), U8(6),
B(Star), R(6),
/* 35 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(4), U8(8),
- B(JumpIfToBooleanTrue), U8(65),
- B(LdaNamedProperty), R(6), U8(5), U8(10),
+ B(LdaNamedProperty), R(6), U8(3), U8(8),
+ B(JumpIfToBooleanTrue), U8(63),
+ B(LdaNamedProperty), R(6), U8(4), U8(10),
B(Star), R(8),
B(LdaSmi), I8(2),
B(Star), R(7),
B(Mov), R(8), R(3),
/* 26 E> */ B(StackCheck),
B(Mov), R(3), R(0),
- /* 45 S> */ B(Mov), R(2), R(22),
- B(Mov), R(0), R(23),
- B(Mov), R(11), R(24),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(22), U8(3),
- B(SuspendGenerator), R(2), R(0), U8(22), U8(0),
- /* 54 S> */ B(Return),
- B(ResumeGenerator), R(2), R(12), R(0), U8(22),
- B(Star), R(22),
+ /* 45 S> */ B(Mov), R(2), R(21),
+ B(Mov), R(0), R(22),
+ B(Mov), R(11), R(23),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(21), U8(3),
+ /* 45 E> */ B(SuspendGenerator), R(2), R(0), U8(21), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(21),
+ B(Star), R(21),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(Star), R(23),
+ B(Star), R(22),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(23),
+ B(TestEqualStrictNoFeedback), R(22),
B(JumpIfTrue), U8(5),
- B(Ldar), R(22),
+ B(Ldar), R(21),
B(ReThrow),
B(LdaZero),
B(Star), R(7),
- B(JumpLoop), U8(98), I8(0),
+ B(JumpLoop), U8(82), I8(0),
B(Jump), U8(40),
- B(Star), R(22),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(22), U8(6), U8(7),
B(Star), R(21),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(21), U8(5), U8(6),
+ B(Star), R(20),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(21),
- B(PushContext), R(22),
+ B(Ldar), R(20),
+ B(PushContext), R(21),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(7), U8(12),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(23),
- B(CallRuntime), U16(Runtime::kReThrow), R(23), U8(1),
- B(PopContext), R(22),
+ B(Star), R(22),
+ B(CallRuntime), U16(Runtime::kReThrow), R(22), U8(1),
+ B(PopContext), R(21),
B(LdaSmi), I8(-1),
- B(Star), R(19),
B(Star), R(18),
+ B(Star), R(17),
B(Jump), U8(7),
- B(Star), R(19),
- B(LdaZero),
B(Star), R(18),
+ B(LdaZero),
+ B(Star), R(17),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(20),
+ B(Star), R(19),
B(LdaZero),
B(TestEqualStrict), R(7), U8(13),
B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(8), U8(14),
+ B(LdaNamedProperty), R(4), U8(7), U8(14),
B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
@@ -1297,107 +1253,106 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(20),
+ B(LdaConstant), U8(8),
B(Star), R(21),
- B(LdaConstant), U8(9),
- B(Star), R(22),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(21), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(20), U8(2),
B(Throw),
- B(Mov), R(context), R(21),
- B(Mov), R(9), R(22),
- B(Mov), R(4), R(23),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(22), U8(2),
+ B(Mov), R(context), R(20),
+ B(Mov), R(9), R(21),
+ B(Mov), R(4), R(22),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(21),
+ B(Ldar), R(20),
B(Jump), U8(27),
- B(Mov), R(9), R(21),
- B(Mov), R(4), R(22),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(21), U8(2),
+ B(Mov), R(9), R(20),
+ B(Mov), R(4), R(21),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(20), U8(2),
B(Star), R(10),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(20),
+ B(Ldar), R(19),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(18),
+ B(TestEqualStrictNoFeedback), R(17),
B(JumpIfFalse), U8(5),
- B(Ldar), R(19),
+ B(Ldar), R(18),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(19),
- B(Mov), R(11), R(18),
- B(CallJSRuntime), U8(%promise_resolve), R(18), U8(2),
+ B(Star), R(18),
+ B(Mov), R(11), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(17), U8(2),
B(LdaZero),
- B(Star), R(14),
- B(Mov), R(11), R(15),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(18),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(18), U8(6), U8(10),
B(Star), R(17),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(17), U8(5), U8(9),
+ B(Star), R(16),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(17),
- B(PushContext), R(18),
+ B(Ldar), R(16),
+ B(PushContext), R(17),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(20),
+ B(Star), R(19),
B(LdaFalse),
- B(Star), R(21),
- B(Mov), R(11), R(19),
- B(CallJSRuntime), U8(%promise_internal_reject), R(19), U8(3),
- B(PopContext), R(18),
+ B(Star), R(20),
+ B(Mov), R(11), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(18), U8(3),
+ B(PopContext), R(17),
B(LdaZero),
- B(Star), R(14),
- B(Mov), R(11), R(15),
+ B(Star), R(13),
+ B(Mov), R(11), R(14),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(15),
B(Star), R(14),
+ B(Star), R(13),
B(Jump), U8(8),
- B(Star), R(15),
- B(LdaSmi), I8(1),
B(Star), R(14),
+ B(LdaSmi), I8(1),
+ B(Star), R(13),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(16),
+ B(Star), R(15),
B(CallJSRuntime), U8(%async_function_promise_release), R(11), U8(1),
- B(Ldar), R(16),
+ B(Ldar), R(15),
B(SetPendingMessage),
- B(Ldar), R(14),
- B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(0),
+ B(Ldar), R(13),
+ B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(15),
+ B(Ldar), R(14),
/* 54 S> */ B(Return),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(ReThrow),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [81],
+ Smi [134],
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [72],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [54, 418, 426],
- [57, 376, 378],
- [63, 236, 244],
- [66, 196, 198],
- [304, 314, 316],
+ [34, 382, 390],
+ [37, 340, 342],
+ [43, 200, 208],
+ [46, 160, 162],
+ [268, 278, 280],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index f09cd9fd00..7401d4d1c0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -11,40 +11,30 @@ snippet: "
function* f() { }
f();
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 66
+bytecode array length: 44
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(0),
- B(Star), R(1),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(Mov), R(closure), R(2),
- B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(this), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
- /* 16 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(2),
- B(Star), R(2),
+ /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(1),
+ B(Star), R(1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 11 E> */ B(Throw),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 16 S> */ B(Return),
B(LdaUndefined),
/* 16 S> */ B(Return),
]
constant pool: [
- Smi [29],
+ Smi [22],
Smi [10],
Smi [7],
]
@@ -56,56 +46,45 @@ snippet: "
function* f() { yield 42 }
f();
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 104
+bytecode array length: 80
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(0),
- B(Star), R(1),
- B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(Mov), R(closure), R(2),
- B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(this), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
- /* 25 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(2),
- B(Star), R(2),
+ /* 11 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(1),
+ B(Star), R(1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 11 E> */ B(Throw),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 25 S> */ B(Return),
/* 16 S> */ B(LdaSmi), I8(42),
- B(Star), R(2),
+ B(Star), R(1),
B(LdaFalse),
- B(Star), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(2), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(2), U8(1),
- /* 25 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(2),
B(Star), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(1), U8(2),
+ /* 16 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(1),
+ B(Star), R(1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 16 E> */ B(Throw),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 25 S> */ B(Return),
B(LdaUndefined),
/* 25 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [67],
+ Smi [22],
+ Smi [58],
Smi [10],
Smi [7],
Smi [10],
@@ -119,64 +98,48 @@ snippet: "
function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 18
+frame size: 17
parameter count: 1
-bytecode array length: 374
+bytecode array length: 336
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(12),
- B(RestoreGeneratorState), R(2),
- B(Star), R(11),
- B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(11),
- B(Mov), R(closure), R(12),
- B(Mov), R(this), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(12), U8(2),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(2),
+ B(Mov), R(closure), R(11),
+ B(Mov), R(this), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(11), U8(2),
B(Star), R(2),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(12), U8(0),
- /* 44 S> */ B(Return),
- B(ResumeGenerator), R(2), R(11), R(0), U8(12),
- B(Star), R(12),
+ /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(11), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(11),
+ B(Star), R(11),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(12),
+ B(Ldar), R(11),
/* 11 E> */ B(Throw),
- B(Ldar), R(12),
+ B(Ldar), R(11),
/* 44 S> */ B(Return),
B(LdaZero),
B(Star), R(7),
+ B(Mov), R(context), R(13),
B(Mov), R(context), R(14),
- B(Mov), R(context), R(15),
/* 30 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
+ B(Star), R(15),
+ B(LdaNamedProperty), R(15), U8(5), U8(1),
B(Star), R(16),
- B(LdaNamedProperty), R(16), U8(5), U8(1),
- B(Star), R(17),
- B(CallProperty0), R(17), R(16), U8(3),
+ B(CallProperty0), R(16), R(15), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(4),
/* 30 E> */ B(LdaNamedProperty), R(4), U8(6), U8(5),
B(Star), R(5),
- B(Ldar), R(11),
- B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(1),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 25 S> */ B(CallProperty0), R(5), R(4), U8(7),
B(Star), R(6),
/* 25 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(LdaNamedProperty), R(6), U8(8), U8(9),
- B(JumpIfToBooleanTrue), U8(67),
- B(LdaNamedProperty), R(6), U8(9), U8(11),
+ B(LdaNamedProperty), R(6), U8(7), U8(9),
+ B(JumpIfToBooleanTrue), U8(65),
+ B(LdaNamedProperty), R(6), U8(8), U8(11),
B(Star), R(8),
B(LdaSmi), I8(2),
B(Star), R(7),
@@ -184,53 +147,52 @@ bytecodes: [
/* 16 E> */ B(StackCheck),
B(Mov), R(3), R(0),
/* 36 S> */ B(LdaFalse),
- B(Star), R(17),
- B(Mov), R(0), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
- B(SuspendGenerator), R(2), R(0), U8(16), U8(1),
- /* 44 S> */ B(Return),
- B(ResumeGenerator), R(2), R(11), R(0), U8(16),
B(Star), R(16),
+ B(Mov), R(0), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
+ /* 36 E> */ B(SuspendGenerator), R(2), R(0), U8(15), U8(1),
+ B(ResumeGenerator), R(2), R(0), U8(15),
+ B(Star), R(15),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
- B(Ldar), R(16),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
+ B(Ldar), R(15),
/* 36 E> */ B(Throw),
B(LdaZero),
- B(Star), R(12),
- B(Mov), R(16), R(13),
+ B(Star), R(11),
+ B(Mov), R(15), R(12),
B(Jump), U8(58),
B(LdaZero),
B(Star), R(7),
- B(JumpLoop), U8(100), I8(0),
+ B(JumpLoop), U8(84), I8(0),
B(Jump), U8(36),
- B(Star), R(16),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(16), U8(12), U8(13),
- B(PushContext), R(16),
B(Star), R(15),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(15), U8(11), U8(12),
+ B(PushContext), R(15),
+ B(Star), R(14),
B(LdaSmi), I8(2),
B(TestEqualStrict), R(7), U8(13),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(17),
- B(CallRuntime), U16(Runtime::kReThrow), R(17), U8(1),
- B(PopContext), R(16),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kReThrow), R(16), U8(1),
+ B(PopContext), R(15),
B(LdaSmi), I8(-1),
- B(Star), R(13),
B(Star), R(12),
+ B(Star), R(11),
B(Jump), U8(8),
- B(Star), R(13),
- B(LdaSmi), I8(1),
B(Star), R(12),
+ B(LdaSmi), I8(1),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(14),
+ B(Star), R(13),
B(LdaZero),
B(TestEqualStrict), R(7), U8(14),
B(JumpIfTrue), U8(90),
- B(LdaNamedProperty), R(4), U8(14), U8(15),
+ B(LdaNamedProperty), R(4), U8(13), U8(15),
B(Star), R(9),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
@@ -242,65 +204,64 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(144),
+ B(Wide), B(LdaSmi), I16(146),
+ B(Star), R(14),
+ B(LdaConstant), U8(14),
B(Star), R(15),
- B(LdaConstant), U8(15),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
- B(Mov), R(context), R(15),
- B(Mov), R(9), R(16),
- B(Mov), R(4), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
+ B(Mov), R(context), R(14),
+ B(Mov), R(9), R(15),
+ B(Mov), R(4), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
B(Jump), U8(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(15),
+ B(Ldar), R(14),
B(Jump), U8(27),
- B(Mov), R(9), R(15),
- B(Mov), R(4), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(15), U8(2),
+ B(Mov), R(9), R(14),
+ B(Mov), R(4), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
B(Star), R(10),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
- B(Ldar), R(14),
+ B(Ldar), R(13),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(SwitchOnSmiNoFeedback), U8(16), U8(2), I8(0),
+ B(Ldar), R(11),
+ B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(13),
+ B(Ldar), R(12),
/* 44 S> */ B(Return),
- B(Ldar), R(13),
+ B(Ldar), R(12),
B(ReThrow),
B(LdaUndefined),
/* 44 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [90],
+ Smi [22],
+ Smi [138],
Smi [10],
Smi [7],
TUPLE2_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [69],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
Smi [15],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
Smi [6],
Smi [9],
]
handlers: [
- [70, 243, 251],
- [73, 207, 209],
- [312, 322, 324],
+ [48, 205, 213],
+ [51, 169, 171],
+ [274, 284, 286],
]
---
@@ -309,117 +270,99 @@ snippet: "
function* f() { yield* g() }
f();
"
-frame size: 10
+frame size: 9
parameter count: 1
-bytecode array length: 255
+bytecode array length: 217
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(2),
- B(RestoreGeneratorState), R(0),
- B(Star), R(1),
- B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(Mov), R(closure), R(2),
- B(Mov), R(this), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(this), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
B(Star), R(0),
/* 38 E> */ B(StackCheck),
- /* 38 E> */ B(SuspendGenerator), R(0), R(0), U8(2), U8(0),
- /* 54 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(2),
- B(Star), R(2),
+ /* 38 E> */ B(SuspendGenerator), R(0), R(0), U8(1), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(1),
+ B(Star), R(1),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 38 E> */ B(Throw),
- B(Ldar), R(2),
+ B(Ldar), R(1),
/* 54 S> */ B(Return),
/* 43 S> */ B(LdaGlobal), U8(4), U8(0),
- B(Star), R(9),
- /* 50 E> */ B(CallUndefinedReceiver0), R(9), U8(2),
- B(Star), R(7),
- B(LdaNamedProperty), R(7), U8(5), U8(4),
B(Star), R(8),
- B(CallProperty0), R(8), R(7), U8(6),
+ /* 50 E> */ B(CallUndefinedReceiver0), R(8), U8(2),
+ B(Star), R(6),
+ B(LdaNamedProperty), R(6), U8(5), U8(4),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(6), U8(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- B(Star), R(4),
- B(LdaNamedProperty), R(4), U8(6), U8(8),
- B(Star), R(6),
- B(LdaUndefined),
+ B(Star), R(3),
+ B(LdaNamedProperty), R(3), U8(6), U8(8),
B(Star), R(5),
+ B(LdaUndefined),
+ B(Star), R(4),
B(LdaZero),
- B(Star), R(3),
- B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(1),
- B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
- B(Ldar), R(3),
- B(SwitchOnSmiNoFeedback), U8(8), U8(2), I8(1),
- B(CallProperty1), R(6), R(4), R(5), U8(10),
+ B(Star), R(2),
+ B(Ldar), R(2),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(1),
+ B(CallProperty1), R(5), R(3), R(4), U8(10),
B(Jump), U8(69),
- B(LdaNamedProperty), R(4), U8(10), U8(12),
+ B(LdaNamedProperty), R(3), U8(9), U8(12),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(9),
- B(CallProperty1), R(9), R(4), R(5), U8(14),
+ B(Star), R(8),
+ B(CallProperty1), R(8), R(3), R(4), U8(14),
B(Jump), U8(52),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 54 S> */ B(Return),
- B(LdaNamedProperty), R(4), U8(11), U8(16),
+ B(LdaNamedProperty), R(3), U8(10), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(9),
- B(CallProperty1), R(9), R(4), R(5), U8(18),
+ B(Star), R(8),
+ B(CallProperty1), R(8), R(3), R(4), U8(18),
B(Jump), U8(32),
- B(LdaNamedProperty), R(4), U8(10), U8(20),
+ B(LdaNamedProperty), R(3), U8(9), U8(20),
B(JumpIfUndefined), U8(21),
B(JumpIfNull), U8(19),
- B(Star), R(9),
- B(CallProperty0), R(9), R(4), U8(22),
+ B(Star), R(8),
+ B(CallProperty0), R(8), R(3), U8(22),
B(Jump), U8(2),
B(JumpIfJSReceiver), U8(9),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
- B(Star), R(2),
+ B(Star), R(1),
B(JumpIfJSReceiver), U8(7),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(12), U8(24),
- B(JumpIfToBooleanTrue), U8(26),
- B(Ldar), R(2),
- B(SuspendGenerator), R(0), R(0), U8(9), U8(1),
- /* 54 S> */ B(Return),
- B(ResumeGenerator), R(0), R(1), R(0), U8(9),
- B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(1), U8(1),
+ B(LdaNamedProperty), R(1), U8(11), U8(24),
+ B(JumpIfToBooleanTrue), U8(24),
+ B(Ldar), R(1),
+ /* 43 E> */ B(SuspendGenerator), R(0), R(0), U8(8), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(8),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(2),
+ B(JumpLoop), U8(114), I8(0),
+ B(LdaNamedProperty), R(1), U8(12), U8(26),
B(Star), R(3),
- B(JumpLoop), U8(130), I8(0),
- B(LdaNamedProperty), R(2), U8(13), U8(26),
- B(Star), R(4),
B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
+ B(TestEqualStrictNoFeedback), R(2),
B(JumpIfFalse), U8(5),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 54 S> */ B(Return),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [91],
+ Smi [22],
+ Smi [185],
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
- Smi [115],
Smi [11],
Smi [31],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 9c876157ad..d6db2f43c3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -11,47 +11,37 @@ top level: yes
snippet: "
import \"bar\";
"
-frame size: 6
+frame size: 5
parameter count: 2
-bytecode array length: 84
+bytecode array length: 62
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(3),
- B(RestoreGeneratorState), R(0),
- B(Star), R(2),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(3),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- /* 13 S> */ B(Return),
- B(ResumeGenerator), R(0), R(2), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 13 S> */ B(Return),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(Ldar), R(1),
/* 13 S> */ B(Return),
]
constant pool: [
- Smi [43],
- FIXED_ARRAY_TYPE,
+ Smi [36],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
]
@@ -62,47 +52,37 @@ handlers: [
snippet: "
import {foo} from \"bar\";
"
-frame size: 6
+frame size: 5
parameter count: 2
-bytecode array length: 84
+bytecode array length: 62
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(3),
- B(RestoreGeneratorState), R(0),
- B(Star), R(2),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(3),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- /* 24 S> */ B(Return),
- B(ResumeGenerator), R(0), R(2), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 24 S> */ B(Return),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(Ldar), R(1),
/* 24 S> */ B(Return),
]
constant pool: [
- Smi [43],
- FIXED_ARRAY_TYPE,
+ Smi [36],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
]
@@ -115,60 +95,50 @@ snippet: "
goo(42);
{ let x; { goo(42) } };
"
-frame size: 7
+frame size: 6
parameter count: 2
-bytecode array length: 114
+bytecode array length: 92
bytecodes: [
- B(Ldar), R(1),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(4),
- B(RestoreGeneratorState), R(1),
- B(Star), R(3),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(4),
- B(Mov), R(this), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(1),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
- /* 64 S> */ B(Return),
- B(ResumeGenerator), R(1), R(3), R(0), U8(5),
- B(Star), R(5),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(1), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 64 S> */ B(Return),
/* 32 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(4),
- B(Star), R(5),
+ B(Star), R(4),
B(LdaSmi), I8(42),
- B(Star), R(6),
- /* 32 E> */ B(CallUndefinedReceiver1), R(5), R(6), U8(0),
+ B(Star), R(5),
+ /* 32 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(0),
/* 47 S> */ B(LdaUndefined),
B(Star), R(0),
/* 52 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(ThrowReferenceErrorIfHole), U8(4),
- B(Star), R(5),
+ B(Star), R(4),
B(LdaSmi), I8(42),
- B(Star), R(6),
- /* 52 E> */ B(CallUndefinedReceiver1), R(5), R(6), U8(2),
+ B(Star), R(5),
+ /* 52 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(2),
B(Star), R(2),
/* 64 S> */ B(Return),
]
constant pool: [
- Smi [43],
- FIXED_ARRAY_TYPE,
+ Smi [36],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
@@ -182,39 +152,29 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 7
+frame size: 6
parameter count: 2
-bytecode array length: 112
+bytecode array length: 90
bytecodes: [
- B(Ldar), R(1),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(4),
- B(RestoreGeneratorState), R(1),
- B(Star), R(3),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(4),
- B(Mov), R(this), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(1),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
- /* 49 S> */ B(Return),
- B(ResumeGenerator), R(1), R(3), R(0), U8(5),
- B(Star), R(5),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(1), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 49 S> */ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
@@ -225,16 +185,16 @@ bytecodes: [
B(Star), R(0),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(5),
+ B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Mov), R(5), R(2),
+ B(Mov), R(4), R(2),
B(Ldar), R(2),
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [43],
- FIXED_ARRAY_TYPE,
+ Smi [36],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
]
@@ -247,42 +207,32 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 7
+frame size: 6
parameter count: 2
-bytecode array length: 118
+bytecode array length: 96
bytecodes: [
- B(Ldar), R(1),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(4),
- B(RestoreGeneratorState), R(1),
- B(Star), R(3),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(4),
- B(Mov), R(this), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(1),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(Ldar), R(1),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
- /* 49 S> */ B(Return),
- B(ResumeGenerator), R(1), R(3), R(0), U8(5),
- B(Star), R(5),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(1), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 49 S> */ B(Return),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
@@ -293,16 +243,16 @@ bytecodes: [
B(Star), R(0),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(5),
+ B(Star), R(4),
B(Inc), U8(1),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(Mov), R(5), R(2),
+ B(Mov), R(4), R(2),
B(Ldar), R(2),
/* 49 S> */ B(Return),
]
constant pool: [
- Smi [49],
- FIXED_ARRAY_TYPE,
+ Smi [42],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
]
@@ -315,42 +265,32 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 7
+frame size: 6
parameter count: 2
-bytecode array length: 122
+bytecode array length: 100
bytecodes: [
- B(Ldar), R(1),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(4),
- B(RestoreGeneratorState), R(1),
- B(Star), R(3),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(4),
- B(Mov), R(this), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(1),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(Ldar), R(1),
- /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(5), U8(0),
- /* 51 S> */ B(Return),
- B(ResumeGenerator), R(1), R(3), R(0), U8(5),
- B(Star), R(5),
+ /* 0 E> */ B(SuspendGenerator), R(1), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(1), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 51 S> */ B(Return),
/* 19 S> */ B(LdaSmi), I8(42),
/* 19 E> */ B(StaModuleVariable), I8(1), U8(0),
@@ -361,16 +301,16 @@ bytecodes: [
B(Star), R(0),
/* 41 S> */ B(LdaModuleVariable), I8(1), U8(0),
B(ToNumeric), U8(1),
- B(Star), R(5),
+ B(Star), R(4),
B(Inc), U8(1),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Mov), R(5), R(2),
+ B(Mov), R(4), R(2),
B(Ldar), R(2),
/* 51 S> */ B(Return),
]
constant pool: [
- Smi [49],
- FIXED_ARRAY_TYPE,
+ Smi [42],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
]
@@ -381,52 +321,42 @@ handlers: [
snippet: "
export default (function () {});
"
-frame size: 6
+frame size: 5
parameter count: 2
-bytecode array length: 97
+bytecode array length: 75
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(3),
- B(RestoreGeneratorState), R(0),
- B(Star), R(2),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(3),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- /* 32 S> */ B(Return),
- B(ResumeGenerator), R(0), R(2), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 32 S> */ B(Return),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(CreateClosure), U8(4), U8(0), U8(0),
B(StaModuleVariable), I8(1), U8(0),
B(Ldar), R(1),
/* 32 S> */ B(Return),
]
constant pool: [
- Smi [49],
- FIXED_ARRAY_TYPE,
+ Smi [42],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
SHARED_FUNCTION_INFO_TYPE,
@@ -438,61 +368,51 @@ handlers: [
snippet: "
export default (class {});
"
-frame size: 8
+frame size: 7
parameter count: 2
-bytecode array length: 118
+bytecode array length: 96
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(3),
- B(RestoreGeneratorState), R(0),
- B(Star), R(2),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(3),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- /* 26 S> */ B(Return),
- B(ResumeGenerator), R(0), R(2), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 26 S> */ B(Return),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(LdaTheHole),
- B(Star), R(7),
+ B(Star), R(6),
B(CreateClosure), U8(5), U8(0), U8(0),
- B(Star), R(4),
+ B(Star), R(3),
B(LdaConstant), U8(4),
- B(Star), R(5),
- B(Mov), R(4), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(3),
- B(Star), R(5),
- B(Ldar), R(6),
+ B(Star), R(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(Ldar), R(5),
B(StaModuleVariable), I8(1), U8(0),
B(Ldar), R(1),
/* 26 S> */ B(Return),
]
constant pool: [
- Smi [49],
- FIXED_ARRAY_TYPE,
+ Smi [42],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
FIXED_ARRAY_TYPE,
@@ -505,47 +425,37 @@ handlers: [
snippet: "
export {foo as goo} from \"bar\"
"
-frame size: 6
+frame size: 5
parameter count: 2
-bytecode array length: 84
+bytecode array length: 62
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(3),
- B(RestoreGeneratorState), R(0),
- B(Star), R(2),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(3),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- /* 30 S> */ B(Return),
- B(ResumeGenerator), R(0), R(2), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 30 S> */ B(Return),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(Ldar), R(1),
/* 30 S> */ B(Return),
]
constant pool: [
- Smi [43],
- FIXED_ARRAY_TYPE,
+ Smi [36],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
]
@@ -556,47 +466,37 @@ handlers: [
snippet: "
export * from \"bar\"
"
-frame size: 6
+frame size: 5
parameter count: 2
-bytecode array length: 84
+bytecode array length: 62
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(3),
- B(RestoreGeneratorState), R(0),
- B(Star), R(2),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(3),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
- /* 19 S> */ B(Return),
- B(ResumeGenerator), R(0), R(2), R(0), U8(4),
- B(Star), R(4),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 0 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 19 S> */ B(Return),
- B(Mov), R(4), R(1),
+ B(Mov), R(3), R(1),
B(Ldar), R(1),
/* 19 S> */ B(Return),
]
constant pool: [
- Smi [43],
- FIXED_ARRAY_TYPE,
+ Smi [36],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
]
@@ -608,56 +508,46 @@ snippet: "
import * as foo from \"bar\"
foo.f(foo, foo.x);
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 111
+bytecode array length: 89
bytecodes: [
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(0), U8(1),
- B(PushContext), R(4),
- B(RestoreGeneratorState), R(0),
- B(Star), R(3),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(LdaConstant), U8(1),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(4),
- B(Mov), R(this), R(6),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(5), U8(2),
+ B(Star), R(5),
+ B(Mov), R(arg0), R(3),
+ B(Mov), R(closure), R(4),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
+ B(PushContext), R(3),
+ B(Mov), R(this), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
B(Star), R(0),
B(LdaZero),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(5), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(4), U8(1),
B(Star), R(1),
/* 0 E> */ B(StackCheck),
B(Ldar), R(0),
- /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(5), U8(0),
- /* 45 S> */ B(Return),
- B(ResumeGenerator), R(0), R(3), R(0), U8(5),
- B(Star), R(5),
+ /* 0 E> */ B(SuspendGenerator), R(0), R(0), U8(4), U8(0),
+ B(ResumeGenerator), R(0), R(0), U8(4),
+ B(Star), R(4),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 45 S> */ B(Return),
/* 31 S> */ B(LdaNamedProperty), R(1), U8(4), U8(0),
- B(Star), R(5),
+ B(Star), R(4),
/* 42 E> */ B(LdaNamedProperty), R(1), U8(5), U8(2),
- B(Star), R(8),
- /* 31 E> */ B(CallProperty2), R(5), R(1), R(1), R(8), U8(4),
+ B(Star), R(7),
+ /* 31 E> */ B(CallProperty2), R(4), R(1), R(1), R(7), U8(4),
B(Star), R(2),
/* 45 S> */ B(Return),
]
constant pool: [
- Smi [55],
- FIXED_ARRAY_TYPE,
+ Smi [48],
+ SCOPE_INFO_TYPE,
Smi [10],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["f"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
new file mode 100644
index 0000000000..c6c446556b
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -0,0 +1,257 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+private fields: yes
+
+---
+snippet: "
+ {
+ class A {
+ #a;
+ constructor() {
+ this.#a = 1;
+ }
+ }
+
+ class B {
+ #a = 1;
+ }
+ new A;
+ new B;
+ }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 131
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(Star), R(8),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(1),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(5), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+ B(Star), R(6),
+ B(Mov), R(7), R(1),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(Star), R(7),
+ B(StaNamedProperty), R(5), U8(4), U8(2),
+ B(PopContext), R(4),
+ B(Mov), R(1), R(2),
+ B(Ldar), R(closure),
+ /* 38 E> */ B(CreateBlockContext), U8(5),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(Star), R(8),
+ B(CreateClosure), U8(7), U8(4), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(6),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(5), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+ B(Star), R(6),
+ B(Mov), R(7), R(0),
+ B(CreateClosure), U8(8), U8(5), U8(2),
+ B(Star), R(7),
+ B(StaNamedProperty), R(5), U8(4), U8(6),
+ B(PopContext), R(4),
+ B(Mov), R(0), R(3),
+ /* 136 S> */ B(Ldar), R(1),
+ /* 136 E> */ B(Construct), R(1), R(0), U8(0), U8(8),
+ /* 145 S> */ B(Ldar), R(0),
+ /* 145 E> */ B(Construct), R(0), R(0), U8(0), U8(10),
+ B(LdaUndefined),
+ /* 154 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class A extends class {} {
+ #a;
+ constructor() {
+ super();
+ this.#a = 1;
+ }
+ }
+
+ class B extends class {} {
+ #a = 1;
+ #b = this.#a;
+ foo() { return this.#a; }
+ bar(v) { this.#b = v; }
+ constructor() {
+ super();
+ this.foo();
+ this.bar(3);
+ }
+ }
+
+ class C extends B {
+ #a = 2;
+ constructor() {
+ (() => super())();
+ }
+ }
+
+ new A;
+ new B;
+ new C;
+ };
+"
+frame size: 15
+parameter count: 1
+bytecode array length: 263
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(Star), R(14),
+ B(CreateClosure), U8(3), U8(0), U8(2),
+ B(Star), R(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
+ B(Star), R(12),
+ B(CreateClosure), U8(4), U8(1), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(1),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(7), R(9),
+ B(Mov), R(13), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
+ B(Star), R(8),
+ B(Mov), R(9), R(2),
+ B(CreateClosure), U8(5), U8(2), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(6), U8(3),
+ B(PopContext), R(6),
+ B(Mov), R(2), R(3),
+ B(Ldar), R(closure),
+ /* 38 E> */ B(CreateBlockContext), U8(7),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaTheHole),
+ B(Star), R(14),
+ B(CreateClosure), U8(10), U8(5), U8(2),
+ B(Star), R(11),
+ B(LdaConstant), U8(9),
+ B(Star), R(12),
+ B(Mov), R(11), R(13),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(12), U8(3),
+ B(Star), R(12),
+ B(CreateClosure), U8(11), U8(6), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(8),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(StaCurrentContextSlot), U8(5),
+ B(CreateClosure), U8(12), U8(7), U8(2),
+ B(Star), R(11),
+ B(CreateClosure), U8(13), U8(8), U8(2),
+ B(Star), R(12),
+ B(Mov), R(7), R(9),
+ B(Mov), R(13), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(5),
+ B(Star), R(8),
+ B(Mov), R(9), R(1),
+ B(CreateClosure), U8(14), U8(9), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(6), U8(10),
+ B(PopContext), R(6),
+ B(Mov), R(1), R(4),
+ B(Ldar), R(closure),
+ /* 140 E> */ B(CreateBlockContext), U8(15),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 356 E> */ B(CreateClosure), U8(17), U8(12), U8(2),
+ B(Star), R(7),
+ B(LdaConstant), U8(16),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kCreatePrivateFieldSymbol), R(0), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(7), R(9),
+ B(Mov), R(1), R(10),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(8), U8(3),
+ B(Star), R(8),
+ B(Mov), R(9), R(0),
+ B(CreateClosure), U8(18), U8(13), U8(2),
+ B(Star), R(9),
+ B(StaNamedProperty), R(7), U8(6), U8(14),
+ B(PopContext), R(6),
+ B(Mov), R(0), R(5),
+ /* 430 S> */ B(Ldar), R(2),
+ /* 430 E> */ B(Construct), R(2), R(0), U8(0), U8(16),
+ /* 439 S> */ B(Ldar), R(1),
+ /* 439 E> */ B(Construct), R(1), R(0), U8(0), U8(18),
+ /* 448 S> */ B(Ldar), R(0),
+ /* 448 E> */ B(Construct), R(0), R(0), U8(0), U8(20),
+ B(LdaUndefined),
+ /* 458 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SYMBOL_TYPE,
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index b8b77107a7..c6dafaec37 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -81,13 +81,13 @@ bytecodes: [
/* 138 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -226,7 +226,7 @@ bytecodes: [
/* 356 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -234,14 +234,14 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index e0567143b5..d870cd3145 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -132,8 +132,8 @@ bytecodes: [
/* 61 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["1"],
]
@@ -207,7 +207,7 @@ bytecodes: [
/* 80 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
@@ -231,7 +231,7 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(3),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(74),
+ B(LdaSmi), I8(76),
B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
@@ -270,34 +270,24 @@ snippet: "
}
f();
"
-frame size: 6
+frame size: 5
parameter count: 1
-bytecode array length: 89
+bytecode array length: 67
bytecodes: [
- B(Ldar), R(2),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(2), U8(1),
- B(PushContext), R(4),
- B(RestoreGeneratorState), R(2),
- B(Star), R(3),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(SwitchOnGeneratorState), R(2), U8(0), U8(1),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(2),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(4), U8(0),
- /* 62 S> */ B(Return),
- B(ResumeGenerator), R(2), R(3), R(0), U8(4),
- B(Star), R(4),
+ /* 11 E> */ B(SuspendGenerator), R(2), R(0), U8(3), U8(0),
+ B(ResumeGenerator), R(2), R(0), U8(3),
+ B(Star), R(3),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(2), U8(1),
B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 11 E> */ B(Throw),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 62 S> */ B(Return),
/* 31 S> */ B(LdaZero),
B(Star), R(1),
@@ -314,7 +304,7 @@ bytecodes: [
/* 62 S> */ B(Return),
]
constant pool: [
- Smi [29],
+ Smi [22],
Smi [10],
Smi [7],
]
@@ -328,74 +318,56 @@ snippet: "
}
f();
"
-frame size: 5
+frame size: 4
parameter count: 1
-bytecode array length: 137
+bytecode array length: 99
bytecodes: [
- B(Ldar), R(1),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(3),
- B(RestoreGeneratorState), R(1),
- B(Star), R(2),
- B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(Mov), R(closure), R(3),
- B(Mov), R(this), R(4),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(SwitchOnGeneratorState), R(1), U8(0), U8(2),
+ B(Mov), R(closure), R(2),
+ B(Mov), R(this), R(3),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(2), U8(2),
B(Star), R(1),
/* 11 E> */ B(StackCheck),
- /* 11 E> */ B(SuspendGenerator), R(1), R(0), U8(3), U8(0),
- /* 56 S> */ B(Return),
- B(ResumeGenerator), R(1), R(2), R(0), U8(3),
- B(Star), R(3),
+ /* 11 E> */ B(SuspendGenerator), R(1), R(0), U8(2), U8(0),
+ B(ResumeGenerator), R(1), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 11 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 56 S> */ B(Return),
/* 31 S> */ B(LdaZero),
B(Star), R(0),
- B(Ldar), R(2),
- B(SwitchOnSmiNoFeedback), U8(4), U8(1), I8(1),
- B(LdaSmi), I8(-2),
- /* 31 E> */ B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 36 S> */ B(LdaSmi), I8(10),
/* 36 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(49),
+ B(JumpIfFalse), U8(47),
/* 18 E> */ B(StackCheck),
/* 47 S> */ B(LdaFalse),
- B(Star), R(4),
- B(Mov), R(0), R(3),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(3), U8(2),
- B(SuspendGenerator), R(1), R(0), U8(3), U8(1),
- /* 56 S> */ B(Return),
- B(ResumeGenerator), R(1), R(2), R(0), U8(3),
B(Star), R(3),
+ B(Mov), R(0), R(2),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(2), U8(2),
+ /* 47 E> */ B(SuspendGenerator), R(1), R(0), U8(2), U8(1),
+ B(ResumeGenerator), R(1), R(0), U8(2),
+ B(Star), R(2),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
- B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
- B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
+ B(Ldar), R(2),
/* 47 E> */ B(Throw),
- B(Ldar), R(3),
+ B(Ldar), R(2),
/* 56 S> */ B(Return),
/* 44 S> */ B(Ldar), R(0),
B(Inc), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(65), I8(0),
+ B(JumpLoop), U8(49), I8(0),
B(LdaUndefined),
/* 56 S> */ B(Return),
]
constant pool: [
- Smi [29],
- Smi [53],
+ Smi [22],
+ Smi [68],
Smi [10],
Smi [7],
- Smi [36],
Smi [10],
Smi [7],
]
@@ -432,7 +404,7 @@ bytecodes: [
B(LdaUndefined),
B(Star), R(9),
B(Mov), R(3), R(8),
- /* 49 E> */ B(CallJSRuntime), U8(%promise_resolve), R(8), U8(2),
+ /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(8), U8(2),
B(LdaZero),
B(Star), R(4),
B(Mov), R(3), R(5),
@@ -451,7 +423,7 @@ bytecodes: [
B(LdaFalse),
B(Star), R(11),
B(Mov), R(3), R(9),
- B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(3),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(9), U8(3),
B(PopContext), R(8),
B(LdaZero),
B(Star), R(4),
@@ -482,7 +454,7 @@ bytecodes: [
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
]
@@ -498,121 +470,104 @@ snippet: "
}
f();
"
-frame size: 12
+frame size: 11
parameter count: 1
-bytecode array length: 224
+bytecode array length: 188
bytecodes: [
- B(Ldar), R(1),
- B(JumpIfUndefined), U8(18),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(1), U8(1),
- B(PushContext), R(4),
- B(RestoreGeneratorState), R(1),
- B(Star), R(3),
- B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
- B(Abort), U8(15),
- B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(Mov), R(closure), R(4),
- B(Mov), R(this), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
+ B(SwitchOnGeneratorState), R(1), U8(0), U8(1),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(Star), R(1),
/* 16 E> */ B(StackCheck),
B(CallJSRuntime), U8(%async_function_promise_create), R(0), U8(0),
B(Star), R(2),
+ B(Mov), R(context), R(5),
B(Mov), R(context), R(6),
- B(Mov), R(context), R(7),
/* 36 S> */ B(LdaZero),
B(Star), R(0),
- B(Ldar), R(3),
- B(SwitchOnSmiNoFeedback), U8(1), U8(1), I8(0),
- B(LdaSmi), I8(-2),
- /* 36 E> */ B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(4),
- B(Abort), U8(15),
/* 41 S> */ B(LdaSmi), I8(10),
/* 41 E> */ B(TestLessThan), R(0), U8(0),
- B(JumpIfFalse), U8(52),
+ B(JumpIfFalse), U8(50),
/* 23 E> */ B(StackCheck),
- /* 52 S> */ B(Mov), R(1), R(8),
- B(Mov), R(0), R(9),
- B(Mov), R(2), R(10),
- B(CallJSRuntime), U8(%async_function_await_uncaught), R(8), U8(3),
- B(SuspendGenerator), R(1), R(0), U8(8), U8(0),
- /* 61 S> */ B(Return),
- B(ResumeGenerator), R(1), R(3), R(0), U8(8),
- B(Star), R(8),
+ /* 52 S> */ B(Mov), R(1), R(7),
+ B(Mov), R(0), R(8),
+ B(Mov), R(2), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncFunctionAwaitUncaught), R(7), U8(3),
+ /* 52 E> */ B(SuspendGenerator), R(1), R(0), U8(7), U8(0),
+ B(ResumeGenerator), R(1), R(0), U8(7),
+ B(Star), R(7),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(1), U8(1),
- B(Star), R(9),
+ B(Star), R(8),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
+ B(TestEqualStrictNoFeedback), R(8),
B(JumpIfTrue), U8(5),
- B(Ldar), R(8),
+ B(Ldar), R(7),
B(ReThrow),
/* 49 S> */ B(Ldar), R(0),
B(Inc), U8(1),
B(Star), R(0),
- B(JumpLoop), U8(68), I8(0),
+ B(JumpLoop), U8(52), I8(0),
B(LdaUndefined),
- B(Star), R(9),
- B(Mov), R(2), R(8),
- /* 49 E> */ B(CallJSRuntime), U8(%promise_resolve), R(8), U8(2),
+ B(Star), R(8),
+ B(Mov), R(2), R(7),
+ /* 49 E> */ B(InvokeIntrinsic), U8(Runtime::k_ResolvePromise), R(7), U8(2),
B(LdaZero),
- B(Star), R(4),
- B(Mov), R(2), R(5),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
B(Jump), U8(58),
B(Jump), U8(42),
- B(Star), R(8),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(8), U8(2), U8(3),
B(Star), R(7),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(7), U8(1), U8(2),
+ B(Star), R(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(7),
- B(PushContext), R(8),
+ B(Ldar), R(6),
+ B(PushContext), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(10),
+ B(Star), R(9),
B(LdaFalse),
- B(Star), R(11),
- B(Mov), R(2), R(9),
- B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(3),
- B(PopContext), R(8),
+ B(Star), R(10),
+ B(Mov), R(2), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_RejectPromise), R(8), U8(3),
+ B(PopContext), R(7),
B(LdaZero),
- B(Star), R(4),
- B(Mov), R(2), R(5),
+ B(Star), R(3),
+ B(Mov), R(2), R(4),
B(Jump), U8(16),
B(LdaSmi), I8(-1),
- B(Star), R(5),
B(Star), R(4),
+ B(Star), R(3),
B(Jump), U8(8),
- B(Star), R(5),
- B(LdaSmi), I8(1),
B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(6),
+ B(Star), R(5),
B(CallJSRuntime), U8(%async_function_promise_release), R(2), U8(1),
- B(Ldar), R(6),
+ B(Ldar), R(5),
B(SetPendingMessage),
- B(Ldar), R(4),
- B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 61 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
B(ReThrow),
B(LdaUndefined),
/* 61 S> */ B(Return),
]
constant pool: [
- Smi [38],
- Smi [39],
+ Smi [58],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
Smi [6],
Smi [9],
]
handlers: [
- [46, 183, 191],
- [49, 141, 143],
+ [26, 147, 155],
+ [29, 105, 107],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index 80a2e4fd49..342af2fe11 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -112,7 +112,7 @@ bytecodes: [
/* 215 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
@@ -121,7 +121,7 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -308,7 +308,7 @@ bytecodes: [
/* 483 S> */ B(Return),
]
constant pool: [
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -319,7 +319,7 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SYMBOL_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
@@ -327,7 +327,7 @@ constant pool: [
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
index cad702e9a3..52c1a91980 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -33,7 +33,7 @@ bytecodes: [
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["e"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
]
handlers: [
[4, 7, 9],
@@ -83,9 +83,9 @@ bytecodes: [
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["e1"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["e2"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
]
handlers: [
[4, 8, 10],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
index 21102174a5..3aa7006e38 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
@@ -100,7 +100,7 @@ bytecodes: [
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["e"],
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
]
handlers: [
[8, 39, 47],
@@ -171,8 +171,8 @@ bytecodes: [
]
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["e"],
- FIXED_ARRAY_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
+ SCOPE_INFO_TYPE,
]
handlers: [
[4, 62, 70],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
index 443235a3eb..c3688b5d5f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
@@ -25,7 +25,7 @@ bytecodes: [
]
constant pool: [
FIXED_ARRAY_TYPE,
- FIXED_ARRAY_TYPE,
+ SCOPE_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 4064ea1d8b..4dad7d48ce 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -45,6 +45,7 @@ class ProgramOptions final {
do_expressions_(false),
async_iteration_(false),
public_fields_(false),
+ private_fields_(false),
static_fields_(false),
verbose_(false) {}
@@ -66,6 +67,7 @@ class ProgramOptions final {
bool do_expressions() const { return do_expressions_; }
bool async_iteration() const { return async_iteration_; }
bool public_fields() const { return public_fields_; }
+ bool private_fields() const { return private_fields_; }
bool static_fields() const { return static_fields_; }
bool verbose() const { return verbose_; }
bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
@@ -85,6 +87,7 @@ class ProgramOptions final {
bool do_expressions_;
bool async_iteration_;
bool public_fields_;
+ bool private_fields_;
bool static_fields_;
bool verbose_;
std::vector<std::string> input_filenames_;
@@ -177,6 +180,8 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.async_iteration_ = true;
} else if (strcmp(argv[i], "--public-fields") == 0) {
options.public_fields_ = true;
+ } else if (strcmp(argv[i], "--private-fields") == 0) {
+ options.private_fields_ = true;
} else if (strcmp(argv[i], "--static-fields") == 0) {
options.static_fields_ = true;
} else if (strcmp(argv[i], "--verbose") == 0) {
@@ -285,6 +290,8 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
async_iteration_ = ParseBoolean(line.c_str() + 17);
} else if (line.compare(0, 15, "public fields: ") == 0) {
public_fields_ = ParseBoolean(line.c_str() + 15);
+ } else if (line.compare(0, 16, "private fields: ") == 0) {
+ private_fields_ = ParseBoolean(line.c_str() + 16);
} else if (line.compare(0, 15, "static fields: ") == 0) {
static_fields_ = ParseBoolean(line.c_str() + 15);
} else if (line == "---") {
@@ -311,6 +318,7 @@ void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
if (do_expressions_) stream << "\ndo expressions: yes";
if (async_iteration_) stream << "\nasync iteration: yes";
if (public_fields_) stream << "\npublic fields: yes";
+ if (private_fields_) stream << "\nprivate fields: yes";
if (static_fields_) stream << "\nstatic fields: yes";
stream << "\n\n";
@@ -416,6 +424,7 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
if (options.do_expressions()) i::FLAG_harmony_do_expressions = true;
if (options.public_fields()) i::FLAG_harmony_public_fields = true;
+ if (options.private_fields()) i::FLAG_harmony_private_fields = true;
if (options.static_fields()) i::FLAG_harmony_static_fields = true;
stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
@@ -426,6 +435,7 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
i::FLAG_harmony_do_expressions = false;
i::FLAG_harmony_public_fields = false;
+ i::FLAG_harmony_private_fields = false;
i::FLAG_harmony_static_fields = false;
}
@@ -474,6 +484,7 @@ void PrintUsage(const char* exec_path) {
" --top-level Process top level code, not the top-level function.\n"
" --do-expressions Enable harmony_do_expressions flag.\n"
" --public-fields Enable harmony_public_fields flag.\n"
+ " --private-fields Enable harmony_private_fields flag.\n"
" --static-fields Enable harmony_static_fields flag.\n"
" --output=file.name\n"
" Specify the output file. If not specified, output goes to "
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index 93e1d930ac..f856716660 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -122,10 +122,10 @@ class InterpreterTester {
function->shared()->set_function_data(*bytecode_.ToHandleChecked());
}
if (!feedback_metadata_.is_null()) {
- function->set_feedback_vector_cell(isolate_->heap()->undefined_cell());
+ function->set_feedback_cell(isolate_->heap()->many_closures_cell());
function->shared()->set_feedback_metadata(
*feedback_metadata_.ToHandleChecked());
- JSFunction::EnsureLiterals(function);
+ JSFunction::EnsureFeedbackVector(function);
}
return function;
}
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 1db0f70ad7..f8e2ee95cd 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2233,7 +2233,7 @@ TEST(ClassAndSuperClass) {
LoadGolden("ClassAndSuperClass.golden")));
}
-TEST(ClassFields) {
+TEST(PublicClassFields) {
bool old_flag = i::FLAG_harmony_public_fields;
i::FLAG_harmony_public_fields = true;
InitializedIgnitionHandleScope scope;
@@ -2283,10 +2283,70 @@ TEST(ClassFields) {
"}\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
- LoadGolden("ClassFields.golden")));
+ LoadGolden("PublicClassFields.golden")));
i::FLAG_harmony_public_fields = old_flag;
}
+TEST(PrivateClassFields) {
+ bool old_flag = i::FLAG_harmony_private_fields;
+ i::FLAG_harmony_private_fields = true;
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+
+ const char* snippets[] = {
+ "{\n"
+ " class A {\n"
+ " #a;\n"
+ " constructor() {\n"
+ " this.#a = 1;\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " class B {\n"
+ " #a = 1;\n"
+ " }\n"
+ " new A;\n"
+ " new B;\n"
+ "}\n",
+
+ "{\n"
+ " class A extends class {} {\n"
+ " #a;\n"
+ " constructor() {\n"
+ " super();\n"
+ " this.#a = 1;\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " class B extends class {} {\n"
+ " #a = 1;\n"
+ " #b = this.#a;\n"
+ " foo() { return this.#a; }\n"
+ " bar(v) { this.#b = v; }\n"
+ " constructor() {\n"
+ " super();\n"
+ " this.foo();\n"
+ " this.bar(3);\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " class C extends B {\n"
+ " #a = 2;\n"
+ " constructor() {\n"
+ " (() => super())();\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " new A;\n"
+ " new B;\n"
+ " new C;\n"
+ "};\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PrivateClassFields.golden")));
+ i::FLAG_harmony_private_fields = old_flag;
+}
+
TEST(StaticClassFields) {
bool old_flag = i::FLAG_harmony_public_fields;
bool old_static_flag = i::FLAG_harmony_static_fields;
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index b8ebef3b28..7eb76ecb21 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -219,27 +219,6 @@ TEST(IntrinsicAsStubCall) {
has_property_helper.NewObject("({ x: 20 })")));
}
-TEST(ClassOf) {
- HandleAndZoneScope handles;
- Isolate* isolate = handles.main_isolate();
- Factory* factory = isolate->factory();
- InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
- Runtime::kInlineClassOf);
- CHECK_EQ(*helper.Invoke(helper.NewObject("123")), *factory->null_value());
- CHECK_EQ(*helper.Invoke(helper.NewObject("'true'")), *factory->null_value());
- CHECK_EQ(*helper.Invoke(helper.NewObject("'foo'")), *factory->null_value());
- CHECK(helper.Invoke(helper.NewObject("({a:1})"))
- ->SameValue(*helper.NewObject("'Object'")));
- CHECK(helper.Invoke(helper.NewObject("(function foo() {})"))
- ->SameValue(*helper.NewObject("'Function'")));
- CHECK(helper.Invoke(helper.NewObject("new Date()"))
- ->SameValue(*helper.NewObject("'Date'")));
- CHECK(helper.Invoke(helper.NewObject("new Set"))
- ->SameValue(*helper.NewObject("'Set'")));
- CHECK(helper.Invoke(helper.NewObject("/x/"))
- ->SameValue(*helper.NewObject("'RegExp'")));
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 48aa4826c9..852fc5f68c 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -89,6 +89,11 @@ TEST(PreParserScopeAnalysis) {
"get_method();",
true, true, false},
+ // Corner case: function expression with name "arguments".
+ {"var test = function arguments(%s) { %s function skippable() { } };\n"
+ "test;\n",
+ false, false, false}
+
// FIXME(marja): Generators and async functions
};
@@ -677,6 +682,12 @@ TEST(PreParserScopeAnalysis) {
i::FLAG_harmony_public_fields = false;
i::FLAG_harmony_static_fields = false;
}},
+ {"class X { #x = 1 }; new X;",
+ [] { i::FLAG_harmony_private_fields = true; },
+ [] { i::FLAG_harmony_private_fields = false; }},
+ {"function t() { return class { #x = 1 }; } new t();",
+ [] { i::FLAG_harmony_private_fields = true; },
+ [] { i::FLAG_harmony_private_fields = false; }},
};
for (unsigned outer_ix = 0; outer_ix < arraysize(outers); ++outer_ix) {
@@ -745,7 +756,7 @@ TEST(PreParserScopeAnalysis) {
scope_with_skipped_functions));
// Do scope allocation (based on the preparsed scope data).
- i::DeclarationScope::Analyze(&using_scope_data);
+ CHECK(i::DeclarationScope::Analyze(&using_scope_data));
// Parse the lazy function again eagerly to produce baseline data.
i::ParseInfo not_using_scope_data(shared);
@@ -760,7 +771,7 @@ TEST(PreParserScopeAnalysis) {
scope_without_skipped_functions));
// Do normal scope allocation.
- i::DeclarationScope::Analyze(&not_using_scope_data);
+ CHECK(i::DeclarationScope::Analyze(&not_using_scope_data));
// Verify that scope allocation gave the same results when parsing w/ the
// scope data (and skipping functions), and when parsing without.
diff --git a/deps/v8/test/cctest/print-extension.h b/deps/v8/test/cctest/print-extension.h
index 74d74ef81b..922d116efd 100644
--- a/deps/v8/test/cctest/print-extension.h
+++ b/deps/v8/test/cctest/print-extension.h
@@ -28,7 +28,7 @@
#ifndef V8_TEST_CCTEST_PRINT_EXTENSION_H_
#define V8_TEST_CCTEST_PRINT_EXTENSION_H_
-#include "src/v8.h"
+#include "include/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 24c10a0f6e..6398c405e4 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -850,3 +850,38 @@ TEST(Regress609134) {
"var a = 42;"
"for (var i = 0; i<3; i++) { a.foo; }");
}
+
+TEST(ObjectSetLazyDataProperty) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Despite getting the property multiple times, the getter should only be
+ // called once and data property reads should continue to produce the same
+ // value.
+ static int getter_call_count;
+ getter_call_count = 0;
+ auto result = obj->SetLazyDataProperty(
+ env.local(), v8_str("foo"),
+ [](Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ getter_call_count++;
+ info.GetReturnValue().Set(getter_call_count);
+ });
+ CHECK(result.FromJust());
+ CHECK_EQ(0, getter_call_count);
+ for (int i = 0; i < 2; i++) {
+ ExpectInt32("obj.foo", 1);
+ CHECK_EQ(1, getter_call_count);
+ }
+
+ // Setting should overwrite the data property.
+ result = obj->SetLazyDataProperty(
+ env.local(), v8_str("bar"),
+ [](Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(false);
+ });
+ CHECK(result.FromJust());
+ ExpectInt32("obj.bar = -1; obj.bar;", -1);
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index b3937382da..b13208950f 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -46,6 +46,7 @@
#include "src/debug/debug.h"
#include "src/execution.h"
#include "src/futex-emulation.h"
+#include "src/global-handles.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/local-allocator.h"
#include "src/lookup.h"
@@ -10379,7 +10380,8 @@ THREADED_TEST(ObjectGetOwnPropertyNames) {
->GetOwnPropertyNames(context.local(),
static_cast<v8::PropertyFilter>(
v8::PropertyFilter::ALL_PROPERTIES |
- v8::PropertyFilter::SKIP_SYMBOLS))
+ v8::PropertyFilter::SKIP_SYMBOLS),
+ v8::KeyConversionMode::kKeepNumbers)
.ToLocal(&properties));
CHECK_EQ(5u, properties->Length());
v8::Local<v8::Value> property;
@@ -10395,14 +10397,34 @@ THREADED_TEST(ObjectGetOwnPropertyNames) {
CHECK_EQ(property.As<v8::Int32>()->Value(), i);
}
- CHECK(value->GetOwnPropertyNames(context.local(), v8::ONLY_ENUMERABLE)
+ CHECK(value
+ ->GetOwnPropertyNames(context.local(),
+ v8::PropertyFilter::ONLY_ENUMERABLE,
+ v8::KeyConversionMode::kKeepNumbers)
.ToLocal(&properties));
+ v8::Local<v8::Array> number_properties;
+ CHECK(value
+ ->GetOwnPropertyNames(context.local(),
+ v8::PropertyFilter::ONLY_ENUMERABLE,
+ v8::KeyConversionMode::kConvertToString)
+ .ToLocal(&number_properties));
CHECK_EQ(4u, properties->Length());
for (int i = 0; i < 4; ++i) {
- v8::Local<v8::Value> property;
- CHECK(properties->Get(context.local(), i).ToLocal(&property) &&
- property->IsInt32());
- CHECK_EQ(property.As<v8::Int32>()->Value(), i);
+ v8::Local<v8::Value> property_index;
+ v8::Local<v8::Value> property_name;
+
+ CHECK(number_properties->Get(context.local(), i).ToLocal(&property_name));
+ CHECK(property_name->IsString());
+
+ CHECK(properties->Get(context.local(), i).ToLocal(&property_index));
+ CHECK(property_index->IsInt32());
+
+ CHECK_EQ(property_index.As<v8::Int32>()->Value(), i);
+ CHECK_EQ(property_name->ToNumber(context.local())
+ .ToLocalChecked()
+ .As<v8::Int32>()
+ ->Value(),
+ i);
}
value = value->GetPrototype().As<v8::Object>();
@@ -11134,264 +11156,6 @@ THREADED_TEST(Constructor) {
}
-static void ConstructorCallback(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- ApiTestFuzzer::Fuzz();
- Local<Object> This;
-
- v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- if (args.IsConstructCall()) {
- Local<Object> Holder = args.Holder();
- This = Object::New(args.GetIsolate());
- Local<Value> proto = Holder->GetPrototype();
- if (proto->IsObject()) {
- This->SetPrototype(context, proto).FromJust();
- }
- } else {
- This = args.This();
- }
-
- This->Set(context, v8_str("a"), args[0]).FromJust();
- args.GetReturnValue().Set(This);
-}
-
-
-static void FakeConstructorCallback(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- ApiTestFuzzer::Fuzz();
- args.GetReturnValue().Set(args[0]);
-}
-
-
-THREADED_TEST(ConstructorForObject) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- {
- Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
- instance_template->SetCallAsFunctionHandler(ConstructorCallback);
- Local<Object> instance =
- instance_template->NewInstance(context.local()).ToLocalChecked();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("obj"), instance)
- .FromJust());
- v8::TryCatch try_catch(isolate);
- Local<Value> value;
- CHECK(!try_catch.HasCaught());
-
- // Call the Object's constructor with a 32-bit signed integer.
- value = CompileRun("(function() { var o = new obj(28); return o.a; })()");
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsInt32());
- CHECK_EQ(28, value->Int32Value(context.local()).FromJust());
-
- Local<Value> args1[] = {v8_num(28)};
- Local<Value> value_obj1 =
- instance->CallAsConstructor(context.local(), 1, args1).ToLocalChecked();
- CHECK(value_obj1->IsObject());
- Local<Object> object1 = Local<Object>::Cast(value_obj1);
- value = object1->Get(context.local(), v8_str("a")).ToLocalChecked();
- CHECK(value->IsInt32());
- CHECK(!try_catch.HasCaught());
- CHECK_EQ(28, value->Int32Value(context.local()).FromJust());
-
- // Call the Object's constructor with a String.
- value =
- CompileRun("(function() { var o = new obj('tipli'); return o.a; })()");
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsString());
- String::Utf8Value string_value1(
- isolate, value->ToString(context.local()).ToLocalChecked());
- CHECK_EQ(0, strcmp("tipli", *string_value1));
-
- Local<Value> args2[] = {v8_str("tipli")};
- Local<Value> value_obj2 =
- instance->CallAsConstructor(context.local(), 1, args2).ToLocalChecked();
- CHECK(value_obj2->IsObject());
- Local<Object> object2 = Local<Object>::Cast(value_obj2);
- value = object2->Get(context.local(), v8_str("a")).ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsString());
- String::Utf8Value string_value2(
- isolate, value->ToString(context.local()).ToLocalChecked());
- CHECK_EQ(0, strcmp("tipli", *string_value2));
-
- // Call the Object's constructor with a Boolean.
- value = CompileRun("(function() { var o = new obj(true); return o.a; })()");
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsBoolean());
- CHECK(value->BooleanValue(context.local()).FromJust());
-
- Local<Value> args3[] = {v8::True(isolate)};
- Local<Value> value_obj3 =
- instance->CallAsConstructor(context.local(), 1, args3).ToLocalChecked();
- CHECK(value_obj3->IsObject());
- Local<Object> object3 = Local<Object>::Cast(value_obj3);
- value = object3->Get(context.local(), v8_str("a")).ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsBoolean());
- CHECK(value->BooleanValue(context.local()).FromJust());
-
- // Call the Object's constructor with undefined.
- Local<Value> args4[] = {v8::Undefined(isolate)};
- Local<Value> value_obj4 =
- instance->CallAsConstructor(context.local(), 1, args4).ToLocalChecked();
- CHECK(value_obj4->IsObject());
- Local<Object> object4 = Local<Object>::Cast(value_obj4);
- value = object4->Get(context.local(), v8_str("a")).ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsUndefined());
-
- // Call the Object's constructor with null.
- Local<Value> args5[] = {v8::Null(isolate)};
- Local<Value> value_obj5 =
- instance->CallAsConstructor(context.local(), 1, args5).ToLocalChecked();
- CHECK(value_obj5->IsObject());
- Local<Object> object5 = Local<Object>::Cast(value_obj5);
- value = object5->Get(context.local(), v8_str("a")).ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsNull());
- }
-
- // Check exception handling when there is no constructor set for the Object.
- {
- Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
- Local<Object> instance =
- instance_template->NewInstance(context.local()).ToLocalChecked();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("obj2"), instance)
- .FromJust());
- v8::TryCatch try_catch(isolate);
- Local<Value> value;
- CHECK(!try_catch.HasCaught());
-
- value = CompileRun("new obj2(28)");
- CHECK(try_catch.HasCaught());
- String::Utf8Value exception_value1(isolate, try_catch.Exception());
- CHECK_EQ(0,
- strcmp("TypeError: obj2 is not a constructor", *exception_value1));
- try_catch.Reset();
-
- Local<Value> args[] = {v8_num(29)};
- CHECK(instance->CallAsConstructor(context.local(), 1, args).IsEmpty());
- CHECK(try_catch.HasCaught());
- String::Utf8Value exception_value2(isolate, try_catch.Exception());
- CHECK_EQ(
- 0, strcmp("TypeError: object is not a constructor", *exception_value2));
- try_catch.Reset();
- }
-
- // Check the case when constructor throws exception.
- {
- Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
- instance_template->SetCallAsFunctionHandler(ThrowValue);
- Local<Object> instance =
- instance_template->NewInstance(context.local()).ToLocalChecked();
- CHECK(context->Global()
- ->Set(context.local(), v8_str("obj3"), instance)
- .FromJust());
- v8::TryCatch try_catch(isolate);
- Local<Value> value;
- CHECK(!try_catch.HasCaught());
-
- value = CompileRun("new obj3(22)");
- CHECK(try_catch.HasCaught());
- String::Utf8Value exception_value1(isolate, try_catch.Exception());
- CHECK_EQ(0, strcmp("22", *exception_value1));
- try_catch.Reset();
-
- Local<Value> args[] = {v8_num(23)};
- CHECK(instance->CallAsConstructor(context.local(), 1, args).IsEmpty());
- CHECK(try_catch.HasCaught());
- String::Utf8Value exception_value2(isolate, try_catch.Exception());
- CHECK_EQ(0, strcmp("23", *exception_value2));
- try_catch.Reset();
- }
-
- // Check whether constructor returns with an object or non-object.
- {
- Local<FunctionTemplate> function_template =
- FunctionTemplate::New(isolate, FakeConstructorCallback);
- Local<Function> function =
- function_template->GetFunction(context.local()).ToLocalChecked();
- Local<Object> instance1 = function;
- CHECK(instance1->IsObject());
- CHECK(instance1->IsFunction());
- CHECK(context->Global()
- ->Set(context.local(), v8_str("obj4"), instance1)
- .FromJust());
- v8::TryCatch try_catch(isolate);
- CHECK(!try_catch.HasCaught());
-
- {
- Local<Value> value = CompileRun("new obj4(28)");
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsObject());
-
- Local<Value> args[] = {v8_num(28)};
- value = instance1->CallAsConstructor(context.local(), 1, args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsObject());
- }
-
- Local<Value> proxy = CompileRun("proxy = new Proxy({},{})");
- CHECK(!try_catch.HasCaught());
- CHECK(proxy->IsProxy());
-
- {
- Local<Value> value = CompileRun("new obj4(proxy)");
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsProxy());
- CHECK(value->SameValue(proxy));
-
- Local<Value> args[] = {proxy};
- value = instance1->CallAsConstructor(context.local(), 1, args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->SameValue(proxy));
- }
-
- Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
- instance_template->SetCallAsFunctionHandler(FakeConstructorCallback);
- Local<Object> instance2 =
- instance_template->NewInstance(context.local()).ToLocalChecked();
- CHECK(instance2->IsObject());
- CHECK(instance2->IsFunction());
- CHECK(context->Global()
- ->Set(context.local(), v8_str("obj5"), instance2)
- .FromJust());
- CHECK(!try_catch.HasCaught());
-
- {
- Local<Value> value = CompileRun("new obj5(28)");
- CHECK(!try_catch.HasCaught());
- CHECK(!value->IsObject());
-
- Local<Value> args[] = {v8_num(28)};
- value = instance2->CallAsConstructor(context.local(), 1, args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(!value->IsObject());
- }
-
- {
- Local<Value> value = CompileRun("new obj5(proxy)");
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsProxy());
- CHECK(value->SameValue(proxy));
-
- Local<Value> args[] = {proxy};
- value = instance2->CallAsConstructor(context.local(), 1, args)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->SameValue(proxy));
- }
- }
-}
-
-
THREADED_TEST(FunctionDescriptorException) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -11687,11 +11451,15 @@ THREADED_TEST(CallAsFunction) {
CHECK(!try_catch.HasCaught());
CHECK_EQ(17, value->Int32Value(context.local()).FromJust());
- // Check that the call-as-function handler can be called through
+ // Check that the call-as-function handler cannot be called through
// new.
value = CompileRun("new obj(43)");
- CHECK(!try_catch.HasCaught());
- CHECK_EQ(-43, value->Int32Value(context.local()).FromJust());
+ CHECK(value.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ String::Utf8Value exception_value(isolate, try_catch.Exception());
+ CHECK_EQ(0,
+ strcmp("TypeError: obj is not a constructor", *exception_value));
+ try_catch.Reset();
// Check that the call-as-function handler can be called through
// the API.
@@ -11727,7 +11495,6 @@ THREADED_TEST(CallAsFunction) {
try_catch.Reset();
// Call an object without call-as-function handler through the API
- value = CompileRun("obj2(28)");
v8::Local<Value> args[] = {v8_num(28)};
CHECK(
instance->CallAsFunction(context.local(), instance, 1, args).IsEmpty());
@@ -14269,6 +14036,7 @@ void SetFunctionEntryHookTest::OnJitEvent(const v8::JitCodeEvent* event) {
info);
}
}
+ break;
default:
break;
}
@@ -14939,7 +14707,10 @@ void CheckIsSymbolAt(v8::Isolate* isolate, v8::Local<v8::Array> properties,
CHECK(value->IsSymbol());
v8::String::Utf8Value symbol_name(isolate,
Local<Symbol>::Cast(value)->Name());
- CHECK_EQ(0, strcmp(name, *symbol_name));
+ if (strcmp(name, *symbol_name) != 0) {
+ FATAL("properties[%u] was Symbol('%s') instead of Symbol('%s').", index,
+ name, *symbol_name);
+ }
}
void CheckStringArray(v8::Isolate* isolate, v8::Local<v8::Array> properties,
@@ -14953,7 +14724,9 @@ void CheckStringArray(v8::Isolate* isolate, v8::Local<v8::Array> properties,
DCHECK(value->IsSymbol());
} else {
v8::String::Utf8Value elm(isolate, value);
- CHECK_EQ(0, strcmp(names[i], *elm));
+ if (strcmp(names[i], *elm) != 0) {
+ FATAL("properties[%u] was '%s' instead of '%s'.", i, *elm, names[i]);
+ }
}
}
}
@@ -14995,9 +14768,13 @@ THREADED_TEST(PropertyEnumeration) {
"var proto = {x: 1, y: 2, z: 3};"
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
+ "result[4] = {21350:1};"
+ "x = Object.create(null);"
+ "x.a = 1; x[12345678] = 1;"
+ "result[5] = x;"
"result;");
v8::Local<v8::Array> elms = obj.As<v8::Array>();
- CHECK_EQ(4u, elms->Length());
+ CHECK_EQ(6u, elms->Length());
int elmc0 = 0;
const char** elmv0 = nullptr;
CheckProperties(
@@ -15040,6 +14817,20 @@ THREADED_TEST(PropertyEnumeration) {
isolate,
elms->Get(context.local(), v8::Integer::New(isolate, 3)).ToLocalChecked(),
elmc4, elmv4);
+ // Dictionary elements.
+ int elmc5 = 1;
+ const char* elmv5[] = {"21350"};
+ CheckProperties(
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 4)).ToLocalChecked(),
+ elmc5, elmv5);
+ // Dictionary properties.
+ int elmc6 = 2;
+ const char* elmv6[] = {"12345678", "a"};
+ CheckProperties(
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 5)).ToLocalChecked(),
+ elmc6, elmv6);
}
@@ -15210,38 +15001,6 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
}
-// Tests that ScriptData can be serialized and deserialized.
-TEST(PreCompileSerialization) {
- // Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy) return;
-
- v8::V8::Initialize();
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- HandleScope handle_scope(isolate);
-
- const char* script = "function foo(a) { return a+1; }";
- v8::ScriptCompiler::Source source(v8_str(script));
- v8::ScriptCompiler::Compile(env.local(), &source,
- v8::ScriptCompiler::kProduceParserCache)
- .ToLocalChecked();
- // Serialize.
- const v8::ScriptCompiler::CachedData* cd = source.GetCachedData();
- i::byte* serialized_data = i::NewArray<i::byte>(cd->length);
- i::MemCopy(serialized_data, cd->data, cd->length);
-
- // Deserialize.
- i::ScriptData* deserialized = new i::ScriptData(serialized_data, cd->length);
-
- // Verify that the original is the same as the deserialized.
- CHECK_EQ(cd->length, deserialized->length());
- CHECK_EQ(0, memcmp(cd->data, deserialized->data(), cd->length));
-
- delete deserialized;
- i::DeleteArray(serialized_data);
-}
-
-
// This tests that we do not allow dictionary load/call inline caches
// to use functions that have not yet been compiled. The potential
// problem of loading a function that has not yet been compiled can
@@ -18321,6 +18080,33 @@ TEST(PromiseHook) {
// 6) resolve hook (p1)
CHECK_EQ(6, promise_hook_data->promise_hook_count);
+ promise_hook_data->Reset();
+ source =
+ "class X extends Promise {\n"
+ " static get [Symbol.species]() {\n"
+ " return Y;\n"
+ " }\n"
+ "}\n"
+ "class Y {\n"
+ " constructor(executor) {\n"
+ " return new Proxy(new Promise(executor), {});\n"
+ " }\n"
+ "}\n"
+ "var x = X.resolve().then(() => {});\n";
+
+ CompileRun(source);
+
+ promise_hook_data->Reset();
+ source =
+ "var resolve, value = '';\n"
+ "var p = new Promise(r => resolve = r);\n";
+
+ CompileRun(source);
+ CHECK_EQ(v8::Promise::kPending, GetPromise("p")->State());
+ CompileRun("resolve(Promise.resolve(value));\n");
+ CHECK_EQ(v8::Promise::kFulfilled, GetPromise("p")->State());
+ CHECK_EQ(9, promise_hook_data->promise_hook_count);
+
delete promise_hook_data;
isolate->SetPromiseHook(nullptr);
}
@@ -22006,6 +21792,42 @@ TEST(RunMicrotasksWithoutEnteringContext) {
isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
}
+static void Regress808911_MicrotaskCallback(void* data) {
+ // So here we expect "current context" to be context1 and
+ // "entered or microtask context" to be context2.
+ v8::Isolate* isolate = static_cast<v8::Isolate*>(data);
+ CHECK(isolate->GetCurrentContext() != isolate->GetEnteredContext());
+ CHECK(isolate->GetCurrentContext() !=
+ isolate->GetEnteredOrMicrotaskContext());
+}
+
+static void Regress808911_CurrentContextWrapper(
+ const v8::FunctionCallbackInfo<Value>& info) {
+ // So here we expect "current context" to be context1 and
+ // "entered or microtask context" to be context2.
+ v8::Isolate* isolate = info.GetIsolate();
+ CHECK(isolate->GetCurrentContext() != isolate->GetEnteredContext());
+ CHECK(isolate->GetCurrentContext() !=
+ isolate->GetEnteredOrMicrotaskContext());
+ isolate->EnqueueMicrotask(Regress808911_MicrotaskCallback, isolate);
+ isolate->RunMicrotasks();
+}
+
+THREADED_TEST(Regress808911) {
+ v8::Isolate* isolate = CcTest::isolate();
+ HandleScope handle_scope(isolate);
+ Local<Context> context1 = Context::New(isolate);
+ Local<Function> function;
+ {
+ Context::Scope context_scope(context1);
+ function = Function::New(context1, Regress808911_CurrentContextWrapper)
+ .ToLocalChecked();
+ }
+ Local<Context> context2 = Context::New(isolate);
+ Context::Scope context_scope(context2);
+ function->CallAsFunction(context2, v8::Undefined(isolate), 0, nullptr)
+ .ToLocalChecked();
+}
TEST(ScopedMicrotasks) {
LocalContext env;
@@ -24420,6 +24242,60 @@ TEST(PromiseStateAndValue) {
CHECK(v8_str("rejected")->SameValue(promise->Result()));
}
+TEST(ResolvedPromiseReFulfill) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::String> value1 = v8::String::NewFromUtf8(isolate, "foo");
+ v8::Local<v8::String> value2 = v8::String::NewFromUtf8(isolate, "bar");
+
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ v8::Local<v8::Promise> promise = resolver->GetPromise();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kPending);
+
+ resolver->Resolve(context.local(), value1).ToChecked();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kFulfilled);
+ CHECK_EQ(promise->Result(), value1);
+
+ // This should be a no-op.
+ resolver->Resolve(context.local(), value2).ToChecked();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kFulfilled);
+ CHECK_EQ(promise->Result(), value1);
+
+ // This should be a no-op.
+ resolver->Reject(context.local(), value2).ToChecked();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kFulfilled);
+ CHECK_EQ(promise->Result(), value1);
+}
+
+TEST(RejectedPromiseReFulfill) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::String> value1 = v8::String::NewFromUtf8(isolate, "foo");
+ v8::Local<v8::String> value2 = v8::String::NewFromUtf8(isolate, "bar");
+
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ v8::Local<v8::Promise> promise = resolver->GetPromise();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kPending);
+
+ resolver->Reject(context.local(), value1).ToChecked();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kRejected);
+ CHECK_EQ(promise->Result(), value1);
+
+ // This should be a no-op.
+ resolver->Reject(context.local(), value2).ToChecked();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kRejected);
+ CHECK_EQ(promise->Result(), value1);
+
+ // This should be a no-op.
+ resolver->Resolve(context.local(), value2).ToChecked();
+ CHECK_EQ(promise->State(), v8::Promise::PromiseState::kRejected);
+ CHECK_EQ(promise->Result(), value1);
+}
+
TEST(DisallowJavascriptExecutionScope) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -25157,33 +25033,6 @@ TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
}
-TEST(StreamingProducesParserCache) {
- const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
- nullptr};
-
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
-
- v8::ScriptCompiler::StreamedSource source(
- new TestSourceStream(chunks),
- v8::ScriptCompiler::StreamedSource::ONE_BYTE);
- v8::ScriptCompiler::ScriptStreamingTask* task =
- v8::ScriptCompiler::StartStreamingScript(
- isolate, &source, v8::ScriptCompiler::kProduceParserCache);
-
- // TestSourceStream::GetMoreData won't block, so it's OK to just run the
- // task here in the main thread.
- task->Run();
- delete task;
-
- const v8::ScriptCompiler::CachedData* cached_data = source.GetCachedData();
- CHECK_NOT_NULL(cached_data);
- CHECK_NOT_NULL(cached_data->data);
- CHECK(!cached_data->rejected);
- CHECK_GT(cached_data->length, 0);
-}
-
TEST(StreamingWithDebuggingEnabledLate) {
// The streaming parser can only parse lazily, i.e. inner functions are not
@@ -25357,13 +25206,11 @@ TEST(CodeCache) {
v8::ScriptOrigin script_origin(v8_str(origin));
v8::ScriptCompiler::Source source(source_string, script_origin);
v8::ScriptCompiler::CompileOptions option =
- v8::ScriptCompiler::kProduceCodeCache;
- v8::ScriptCompiler::Compile(context, &source, option).ToLocalChecked();
- int length = source.GetCachedData()->length;
- uint8_t* cache_data = new uint8_t[length];
- memcpy(cache_data, source.GetCachedData()->data, length);
- cache = new v8::ScriptCompiler::CachedData(
- cache_data, length, v8::ScriptCompiler::CachedData::BufferOwned);
+ v8::ScriptCompiler::kNoCompileOptions;
+ v8::Local<v8::Script> script =
+ v8::ScriptCompiler::Compile(context, &source, option).ToLocalChecked();
+ cache = v8::ScriptCompiler::CreateCodeCache(script->GetUnboundScript(),
+ source_string);
}
isolate1->Dispose();
@@ -25414,15 +25261,6 @@ void TestInvalidCacheData(v8::ScriptCompiler::CompileOptions option) {
script->Run(context).ToLocalChecked()->Int32Value(context).FromJust());
}
-TEST(InvalidParserCacheData) {
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
- LocalContext context;
- if (i::FLAG_lazy) {
- // Cached parser data is not consumed while parsing eagerly.
- TestInvalidCacheData(v8::ScriptCompiler::kConsumeParserCache);
- }
-}
TEST(InvalidCodeCacheData) {
v8::V8::Initialize();
@@ -25432,67 +25270,6 @@ TEST(InvalidCodeCacheData) {
}
-TEST(ParserCacheRejectedGracefully) {
- // Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy) return;
-
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
- LocalContext context;
- // Produce valid cached data.
- v8::ScriptOrigin origin(v8_str("origin"));
- v8::Local<v8::String> source_str = v8_str("function foo() {}");
- v8::ScriptCompiler::Source source(source_str, origin);
- v8::Local<v8::Script> script =
- v8::ScriptCompiler::Compile(context.local(), &source,
- v8::ScriptCompiler::kProduceParserCache)
- .ToLocalChecked();
- USE(script);
- const v8::ScriptCompiler::CachedData* original_cached_data =
- source.GetCachedData();
- CHECK_NOT_NULL(original_cached_data);
- CHECK_NOT_NULL(original_cached_data->data);
- CHECK(!original_cached_data->rejected);
- CHECK_GT(original_cached_data->length, 0);
- // Recompiling the same script with it won't reject the data.
- {
- v8::ScriptCompiler::Source source_with_cached_data(
- source_str, origin,
- new v8::ScriptCompiler::CachedData(original_cached_data->data,
- original_cached_data->length));
- v8::Local<v8::Script> script =
- v8::ScriptCompiler::Compile(context.local(), &source_with_cached_data,
- v8::ScriptCompiler::kConsumeParserCache)
- .ToLocalChecked();
- USE(script);
- const v8::ScriptCompiler::CachedData* new_cached_data =
- source_with_cached_data.GetCachedData();
- CHECK_NOT_NULL(new_cached_data);
- CHECK(!new_cached_data->rejected);
- }
- // Compile an incompatible script with the cached data. The new script doesn't
- // have the same starting position for the function as the old one, so the old
- // cached data will be incompatible with it and will be rejected.
- {
- v8::Local<v8::String> incompatible_source_str =
- v8_str(" function foo() {}");
- v8::ScriptCompiler::Source source_with_cached_data(
- incompatible_source_str, origin,
- new v8::ScriptCompiler::CachedData(original_cached_data->data,
- original_cached_data->length));
- v8::Local<v8::Script> script =
- v8::ScriptCompiler::Compile(context.local(), &source_with_cached_data,
- v8::ScriptCompiler::kConsumeParserCache)
- .ToLocalChecked();
- USE(script);
- const v8::ScriptCompiler::CachedData* new_cached_data =
- source_with_cached_data.GetCachedData();
- CHECK_NOT_NULL(new_cached_data);
- CHECK(new_cached_data->rejected);
- }
-}
-
-
TEST(StringConcatOverflow) {
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
@@ -25814,6 +25591,123 @@ TEST(ExperimentalExtras) {
CHECK_EQ(7, result->Int32Value(env.local()).FromJust());
}
+TEST(ExtrasCreatePromise) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ LocalContext env;
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
+
+ auto func = binding->Get(env.local(), v8_str("testCreatePromise"))
+ .ToLocalChecked()
+ .As<v8::Function>();
+ CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
+
+ auto promise = CompileRun(
+ "func();\n"
+ "func();\n"
+ "%OptimizeFunctionOnNextCall(func);\n"
+ "func()\n")
+ .As<v8::Promise>();
+ CHECK_EQ(v8::Promise::kPending, promise->State());
+}
+
+TEST(ExtrasCreatePromiseWithParent) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ LocalContext env;
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
+
+ auto func = binding->Get(env.local(), v8_str("testCreatePromiseWithParent"))
+ .ToLocalChecked()
+ .As<v8::Function>();
+ CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
+
+ auto promise = CompileRun(
+ "var parent = new Promise((a, b) => {});\n"
+ "func(parent);\n"
+ "func(parent);\n"
+ "%OptimizeFunctionOnNextCall(func);\n"
+ "func(parent)\n")
+ .As<v8::Promise>();
+ CHECK_EQ(v8::Promise::kPending, promise->State());
+}
+
+TEST(ExtrasRejectPromise) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ LocalContext env;
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
+
+ auto func = binding->Get(env.local(), v8_str("testRejectPromise"))
+ .ToLocalChecked()
+ .As<v8::Function>();
+ CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
+
+ auto rejected_promise = CompileRun(
+ "function newPromise() {\n"
+ " return new Promise((a, b) => {});\n"
+ "}\n"
+ "func(newPromise(), 1);\n"
+ "func(newPromise(), 1);\n"
+ "%OptimizeFunctionOnNextCall(func);\n"
+ "var promise = newPromise();\n"
+ "func(promise, 1);\n"
+ "promise;\n")
+ .As<v8::Promise>();
+ CHECK_EQ(v8::Promise::kRejected, rejected_promise->State());
+ CHECK_EQ(1, rejected_promise->Result()->Int32Value(env.local()).FromJust());
+}
+
+TEST(ExtrasResolvePromise) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ LocalContext env;
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
+
+ auto func = binding->Get(env.local(), v8_str("testResolvePromise"))
+ .ToLocalChecked()
+ .As<v8::Function>();
+ CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
+
+ auto pending_promise = CompileRun(
+ "function newPromise() {\n"
+ " return new Promise((a, b) => {});\n"
+ "}\n"
+ "func(newPromise(), newPromise());\n"
+ "func(newPromise(), newPromise());\n"
+ "%OptimizeFunctionOnNextCall(func);\n"
+ "var promise = newPromise();\n"
+ "func(promise, newPromise());\n"
+ "promise;\n")
+ .As<v8::Promise>();
+ CHECK_EQ(v8::Promise::kPending, pending_promise->State());
+
+ auto fulfilled_promise = CompileRun(
+ "function newPromise() {\n"
+ " return new Promise((a, b) => {});\n"
+ "}\n"
+ "func(newPromise(), 1);\n"
+ "func(newPromise(), 1);\n"
+ "%OptimizeFunctionOnNextCall(func);\n"
+ "var promise = newPromise();\n"
+ "func(promise, 1);\n"
+ "promise;\n")
+ .As<v8::Promise>();
+ CHECK_EQ(v8::Promise::kFulfilled, fulfilled_promise->State());
+ CHECK_EQ(1, fulfilled_promise->Result()->Int32Value(env.local()).FromJust());
+}
TEST(ExtrasUtilsObject) {
LocalContext context;
@@ -27225,3 +27119,21 @@ TEST(PersistentValueMap) {
.ToLocalChecked();
map.Set("key", value);
}
+
+TEST(WasmStreamingAbort) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::WasmModuleObjectBuilderStreaming streaming(isolate);
+ streaming.Abort(v8::Object::New(isolate));
+ CHECK_EQ(streaming.GetPromise()->State(), v8::Promise::kRejected);
+}
+
+TEST(WasmStreamingAbortNoReject) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::WasmModuleObjectBuilderStreaming streaming(isolate);
+ streaming.Abort({});
+ CHECK_EQ(streaming.GetPromise()->State(), v8::Promise::kPending);
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 10a111c8df..a5afc1e2f3 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -3923,6 +3923,101 @@ TEST(use_scratch_register_scope) {
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
}
+TEST(use_scratch_vfp_register_scope) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, nullptr, 0);
+
+ VfpRegList orig_scratches = *assm.GetScratchVfpRegisterList();
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ CHECK_EQ(orig_scratches, d14.ToVfpRegList() | d15.ToVfpRegList());
+ } else {
+ CHECK_EQ(orig_scratches, d14.ToVfpRegList());
+ }
+
+ // Test each configuration of scratch registers we can have at the same time.
+
+ {
+ UseScratchRegisterScope temps(&assm);
+
+ SwVfpRegister s1_scratch = temps.AcquireS();
+ CHECK_EQ(s1_scratch, s28);
+
+ SwVfpRegister s2_scratch = temps.AcquireS();
+ CHECK_EQ(s2_scratch, s29);
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ SwVfpRegister s3_scratch = temps.AcquireS();
+ CHECK_EQ(s3_scratch, s30);
+
+ SwVfpRegister s4_scratch = temps.AcquireS();
+ CHECK_EQ(s4_scratch, s31);
+ }
+ }
+
+ CHECK_EQ(*assm.GetScratchVfpRegisterList(), orig_scratches);
+
+ {
+ UseScratchRegisterScope temps(&assm);
+
+ SwVfpRegister s1_scratch = temps.AcquireS();
+ CHECK_EQ(s1_scratch, s28);
+
+ SwVfpRegister s2_scratch = temps.AcquireS();
+ CHECK_EQ(s2_scratch, s29);
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ DwVfpRegister d_scratch = temps.AcquireD();
+ CHECK_EQ(d_scratch, d15);
+ }
+ }
+
+ CHECK_EQ(*assm.GetScratchVfpRegisterList(), orig_scratches);
+
+ {
+ UseScratchRegisterScope temps(&assm);
+
+ DwVfpRegister d_scratch = temps.AcquireD();
+ CHECK_EQ(d_scratch, d14);
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ SwVfpRegister s1_scratch = temps.AcquireS();
+ CHECK_EQ(s1_scratch, s30);
+
+ SwVfpRegister s2_scratch = temps.AcquireS();
+ CHECK_EQ(s2_scratch, s31);
+ }
+ }
+
+ CHECK_EQ(*assm.GetScratchVfpRegisterList(), orig_scratches);
+
+ {
+ UseScratchRegisterScope temps(&assm);
+
+ DwVfpRegister d1_scratch = temps.AcquireD();
+ CHECK_EQ(d1_scratch, d14);
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ DwVfpRegister d2_scratch = temps.AcquireD();
+ CHECK_EQ(d2_scratch, d15);
+ }
+ }
+
+ CHECK_EQ(*assm.GetScratchVfpRegisterList(), orig_scratches);
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ UseScratchRegisterScope temps(&assm);
+
+ QwNeonRegister q_scratch = temps.AcquireQ();
+ CHECK_EQ(q_scratch, q7);
+ }
+
+ CHECK_EQ(*assm.GetScratchVfpRegisterList(), orig_scratches);
+}
+
TEST(split_add_immediate) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 4fc80201d2..21d5a6999d 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -196,13 +196,13 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- MakeAssemblerBufferExecutable(buf, allocated); \
- Assembler::FlushICache(isolate, buf, masm.SizeOfGeneratedCode()); \
- { \
- void (*test_function)(void); \
- memcpy(&test_function, &buf, sizeof(buf)); \
- test_function(); \
+#define RUN() \
+ MakeAssemblerBufferExecutable(buf, allocated); \
+ Assembler::FlushICache(buf, masm.SizeOfGeneratedCode()); \
+ { \
+ void (*test_function)(void); \
+ memcpy(&test_function, &buf, sizeof(buf)); \
+ test_function(); \
}
#define END() \
@@ -251,37 +251,37 @@ TEST(stack_ops) {
SETUP();
START();
- // save csp.
- __ Mov(x29, csp);
+ // save sp.
+ __ Mov(x29, sp);
- // Set the csp to a known value.
+ // Set the sp to a known value.
__ Mov(x16, 0x1000);
- __ Mov(csp, x16);
- __ Mov(x0, csp);
+ __ Mov(sp, x16);
+ __ Mov(x0, sp);
- // Add immediate to the csp, and move the result to a normal register.
- __ Add(csp, csp, Operand(0x50));
- __ Mov(x1, csp);
+ // Add immediate to the sp, and move the result to a normal register.
+ __ Add(sp, sp, Operand(0x50));
+ __ Mov(x1, sp);
- // Add extended to the csp, and move the result to a normal register.
+ // Add extended to the sp, and move the result to a normal register.
__ Mov(x17, 0xFFF);
- __ Add(csp, csp, Operand(x17, SXTB));
- __ Mov(x2, csp);
+ __ Add(sp, sp, Operand(x17, SXTB));
+ __ Mov(x2, sp);
- // Create an csp using a logical instruction, and move to normal register.
- __ Orr(csp, xzr, Operand(0x1FFF));
- __ Mov(x3, csp);
+ // Create an sp using a logical instruction, and move to normal register.
+ __ Orr(sp, xzr, Operand(0x1FFF));
+ __ Mov(x3, sp);
- // Write wcsp using a logical instruction.
- __ Orr(wcsp, wzr, Operand(0xFFFFFFF8L));
- __ Mov(x4, csp);
+ // Write wsp using a logical instruction.
+ __ Orr(wsp, wzr, Operand(0xFFFFFFF8L));
+ __ Mov(x4, sp);
- // Write csp, and read back wcsp.
- __ Orr(csp, xzr, Operand(0xFFFFFFF8L));
- __ Mov(w5, wcsp);
+ // Write sp, and read back wsp.
+ __ Orr(sp, xzr, Operand(0xFFFFFFF8L));
+ __ Mov(w5, wsp);
- // restore csp.
- __ Mov(csp, x29);
+ // restore sp.
+ __ Mov(sp, x29);
END();
RUN();
@@ -839,15 +839,15 @@ TEST(bic) {
__ Bic(x10, x0, Operand(0x1F));
__ Bic(x11, x0, Operand(0x100));
- // Test bic into csp when the constant cannot be encoded in the immediate
+ // Test bic into sp when the constant cannot be encoded in the immediate
// field.
- // Use x20 to preserve csp. We check for the result via x21 because the
- // test infrastructure requires that csp be restored to its original value.
- __ Mov(x20, csp);
+ // Use x20 to preserve sp. We check for the result via x21 because the
+ // test infrastructure requires that sp be restored to its original value.
+ __ Mov(x20, sp);
__ Mov(x0, 0xFFFFFF);
- __ Bic(csp, x0, Operand(0xABCDEF));
- __ Mov(x21, csp);
- __ Mov(csp, x20);
+ __ Bic(sp, x0, Operand(0xABCDEF));
+ __ Mov(x21, sp);
+ __ Mov(sp, x20);
END();
RUN();
@@ -6686,13 +6686,23 @@ TEST(ldur_stur) {
TEARDOWN();
}
+namespace {
+
+void LoadLiteral(MacroAssembler* masm, Register reg, uint64_t imm) {
+ // Since we do not allow non-relocatable entries in the literal pool, we need
+ // to fake a relocation mode that is not NONE here.
+ masm->Ldr(reg, Immediate(imm, RelocInfo::RUNTIME_ENTRY));
+}
+
+} // namespace
+
TEST(ldr_pcrel_large_offset) {
INIT_V8();
SETUP_SIZE(1 * MB);
START();
- __ Ldr(x1, Immediate(0x1234567890ABCDEFUL));
+ LoadLiteral(&masm, x1, 0x1234567890ABCDEFUL);
{
v8::internal::PatchingAssembler::BlockPoolsScope scope(&masm);
@@ -6702,7 +6712,7 @@ TEST(ldr_pcrel_large_offset) {
}
}
- __ Ldr(x2, Immediate(0x1234567890ABCDEFUL));
+ LoadLiteral(&masm, x2, 0x1234567890ABCDEFUL);
END();
@@ -6719,14 +6729,13 @@ TEST(ldr_literal) {
SETUP();
START();
- __ Ldr(x2, Immediate(0x1234567890ABCDEFUL));
- __ Ldr(d13, 1.234);
+ LoadLiteral(&masm, x2, 0x1234567890ABCDEFUL);
+
END();
RUN();
CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
- CHECK_EQUAL_FP64(1.234, d13);
TEARDOWN();
}
@@ -6758,8 +6767,8 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
__ CheckConstPool(true, true);
CHECK_CONSTANT_POOL_SIZE(0);
- __ Ldr(x0, Immediate(0x1234567890ABCDEFUL));
- __ Ldr(d0, 1.234);
+ LoadLiteral(&masm, x0, 0x1234567890ABCDEFUL);
+ LoadLiteral(&masm, x1, 0xABCDEF1234567890UL);
CHECK_CONSTANT_POOL_SIZE(16);
code_size += 2 * kInstructionSize;
@@ -6799,8 +6808,8 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
CHECK_CONSTANT_POOL_SIZE(0);
// These loads should be after the pool (and will require a new one).
- __ Ldr(x4, Immediate(0x34567890ABCDEF12UL));
- __ Ldr(d4, 123.4);
+ LoadLiteral(&masm, x4, 0x34567890ABCDEF12UL);
+ LoadLiteral(&masm, x5, 0xABCDEF0123456789UL);
CHECK_CONSTANT_POOL_SIZE(16);
END();
@@ -6808,9 +6817,9 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
// Check that the literals loaded correctly.
CHECK_EQUAL_64(0x1234567890ABCDEFUL, x0);
- CHECK_EQUAL_FP64(1.234, d0);
+ CHECK_EQUAL_64(0xABCDEF1234567890UL, x1);
CHECK_EQUAL_64(0x34567890ABCDEF12UL, x4);
- CHECK_EQUAL_FP64(123.4, d4);
+ CHECK_EQUAL_64(0xABCDEF0123456789UL, x5);
TEARDOWN();
}
@@ -7158,12 +7167,12 @@ TEST(preshift_immediates) {
// pre-shifted encodable immediate followed by a post-shift applied to
// the arithmetic or logical operation.
- // Save csp.
- __ Mov(x29, csp);
+ // Save sp.
+ __ Mov(x29, sp);
// Set the registers to known values.
__ Mov(x0, 0x1000);
- __ Mov(csp, 0x1000);
+ __ Mov(sp, 0x1000);
// Arithmetic ops.
__ Add(x1, x0, 0x1F7DE);
@@ -7181,21 +7190,21 @@ TEST(preshift_immediates) {
__ Eor(x11, x0, 0x18001);
// Ops using the stack pointer.
- __ Add(csp, csp, 0x1F7F0);
- __ Mov(x12, csp);
- __ Mov(csp, 0x1000);
+ __ Add(sp, sp, 0x1F7F0);
+ __ Mov(x12, sp);
+ __ Mov(sp, 0x1000);
- __ Adds(x13, csp, 0x1F7F0);
+ __ Adds(x13, sp, 0x1F7F0);
- __ Orr(csp, x0, 0x1F7F0);
- __ Mov(x14, csp);
- __ Mov(csp, 0x1000);
+ __ Orr(sp, x0, 0x1F7F0);
+ __ Mov(x14, sp);
+ __ Mov(sp, 0x1000);
- __ Add(csp, csp, 0x10100);
- __ Mov(x15, csp);
+ __ Add(sp, sp, 0x10100);
+ __ Mov(x15, sp);
- // Restore csp.
- __ Mov(csp, x29);
+ // Restore sp.
+ __ Mov(sp, x29);
END();
RUN();
@@ -11840,8 +11849,7 @@ TEST(system_msr) {
TEARDOWN();
}
-
-TEST(system_nop) {
+TEST(system) {
INIT_V8();
SETUP();
RegisterDump before;
@@ -11849,6 +11857,7 @@ TEST(system_nop) {
START();
before.Dump(&masm);
__ Nop();
+ __ Csdb();
END();
RUN();
@@ -11867,7 +11876,7 @@ TEST(zero_dest) {
START();
// Preserve the system stack pointer, in case we clobber it.
- __ Mov(x30, csp);
+ __ Mov(x30, sp);
// Initialize the other registers used in this test.
uint64_t literal_base = 0x0100001000100101UL;
__ Mov(x0, 0);
@@ -11907,12 +11916,12 @@ TEST(zero_dest) {
__ sub(xzr, x7, xzr);
__ sub(xzr, xzr, x7);
- // Swap the saved system stack pointer with the real one. If csp was written
+ // Swap the saved system stack pointer with the real one. If sp was written
// during the test, it will show up in x30. This is done because the test
- // framework assumes that csp will be valid at the end of the test.
+ // framework assumes that sp will be valid at the end of the test.
__ Mov(x29, x30);
- __ Mov(x30, csp);
- __ Mov(csp, x29);
+ __ Mov(x30, sp);
+ __ Mov(sp, x29);
// We used x29 as a scratch register, so reset it to make sure it doesn't
// trigger a test failure.
__ Add(x29, x28, x1);
@@ -11934,7 +11943,7 @@ TEST(zero_dest_setflags) {
START();
// Preserve the system stack pointer, in case we clobber it.
- __ Mov(x30, csp);
+ __ Mov(x30, sp);
// Initialize the other registers used in this test.
uint64_t literal_base = 0x0100001000100101UL;
__ Mov(x0, 0);
@@ -11972,12 +11981,12 @@ TEST(zero_dest_setflags) {
__ subs(xzr, x3, xzr);
__ subs(xzr, xzr, x3);
- // Swap the saved system stack pointer with the real one. If csp was written
+ // Swap the saved system stack pointer with the real one. If sp was written
// during the test, it will show up in x30. This is done because the test
- // framework assumes that csp will be valid at the end of the test.
+ // framework assumes that sp will be valid at the end of the test.
__ Mov(x29, x30);
- __ Mov(x30, csp);
- __ Mov(csp, x29);
+ __ Mov(x30, sp);
+ __ Mov(sp, x29);
// We used x29 as a scratch register, so reset it to make sure it doesn't
// trigger a test failure.
__ Add(x29, x28, x1);
@@ -12008,15 +12017,15 @@ TEST(register_bit) {
CHECK(xzr.bit() == (1UL << kZeroRegCode));
// Internal ABI definitions.
- CHECK(csp.bit() == (1UL << kSPRegInternalCode));
- CHECK(csp.bit() != xzr.bit());
+ CHECK(sp.bit() == (1UL << kSPRegInternalCode));
+ CHECK(sp.bit() != xzr.bit());
// xn.bit() == wn.bit() at all times, for the same n.
CHECK(x0.bit() == w0.bit());
CHECK(x1.bit() == w1.bit());
CHECK(x10.bit() == w10.bit());
CHECK(xzr.bit() == wzr.bit());
- CHECK(csp.bit() == wcsp.bit());
+ CHECK(sp.bit() == wsp.bit());
}
@@ -12478,7 +12487,6 @@ static void PushPopFPSimpleHelper(int reg_count, int reg_size,
uint64_t literal_base = 0x0100001000100101UL;
{
- CHECK(__ StackPointer().Is(csp));
int i;
// Initialize the registers, using X registers to load the literal.
@@ -12637,8 +12645,6 @@ static void PushPopMixedMethodsHelper(int reg_size) {
START();
{
- CHECK(__ StackPointer().Is(csp));
-
__ Mov(x[3], literal_base * 3);
__ Mov(x[2], literal_base * 2);
__ Mov(x[1], literal_base * 1);
@@ -12681,15 +12687,11 @@ TEST(push_pop_mixed_methods_64) {
PushPopMixedMethodsHelper(kXRegSizeInBits);
}
-
-TEST(push_pop_csp) {
+TEST(push_pop) {
INIT_V8();
SETUP();
START();
-
- CHECK(csp.Is(__ StackPointer()));
-
__ Mov(x3, 0x3333333333333333UL);
__ Mov(x2, 0x2222222222222222UL);
__ Mov(x1, 0x1111111111111111UL);
@@ -13863,8 +13865,8 @@ TEST(isvalid) {
CHECK(xzr.IsValid());
CHECK(wzr.IsValid());
- CHECK(csp.IsValid());
- CHECK(wcsp.IsValid());
+ CHECK(sp.IsValid());
+ CHECK(wsp.IsValid());
CHECK(d0.IsValid());
CHECK(s0.IsValid());
@@ -13875,14 +13877,14 @@ TEST(isvalid) {
CHECK(w0.IsRegister());
CHECK(xzr.IsRegister());
CHECK(wzr.IsRegister());
- CHECK(csp.IsRegister());
- CHECK(wcsp.IsRegister());
+ CHECK(sp.IsRegister());
+ CHECK(wsp.IsRegister());
CHECK(!x0.IsVRegister());
CHECK(!w0.IsVRegister());
CHECK(!xzr.IsVRegister());
CHECK(!wzr.IsVRegister());
- CHECK(!csp.IsVRegister());
- CHECK(!wcsp.IsVRegister());
+ CHECK(!sp.IsVRegister());
+ CHECK(!wsp.IsVRegister());
CHECK(d0.IsVRegister());
CHECK(s0.IsVRegister());
@@ -13898,8 +13900,8 @@ TEST(isvalid) {
CHECK(static_cast<CPURegister>(xzr).IsValid());
CHECK(static_cast<CPURegister>(wzr).IsValid());
- CHECK(static_cast<CPURegister>(csp).IsValid());
- CHECK(static_cast<CPURegister>(wcsp).IsValid());
+ CHECK(static_cast<CPURegister>(sp).IsValid());
+ CHECK(static_cast<CPURegister>(wsp).IsValid());
CHECK(static_cast<CPURegister>(d0).IsValid());
CHECK(static_cast<CPURegister>(s0).IsValid());
@@ -13910,14 +13912,14 @@ TEST(isvalid) {
CHECK(static_cast<CPURegister>(w0).IsRegister());
CHECK(static_cast<CPURegister>(xzr).IsRegister());
CHECK(static_cast<CPURegister>(wzr).IsRegister());
- CHECK(static_cast<CPURegister>(csp).IsRegister());
- CHECK(static_cast<CPURegister>(wcsp).IsRegister());
+ CHECK(static_cast<CPURegister>(sp).IsRegister());
+ CHECK(static_cast<CPURegister>(wsp).IsRegister());
CHECK(!static_cast<CPURegister>(x0).IsVRegister());
CHECK(!static_cast<CPURegister>(w0).IsVRegister());
CHECK(!static_cast<CPURegister>(xzr).IsVRegister());
CHECK(!static_cast<CPURegister>(wzr).IsVRegister());
- CHECK(!static_cast<CPURegister>(csp).IsVRegister());
- CHECK(!static_cast<CPURegister>(wcsp).IsVRegister());
+ CHECK(!static_cast<CPURegister>(sp).IsVRegister());
+ CHECK(!static_cast<CPURegister>(wsp).IsVRegister());
CHECK(static_cast<CPURegister>(d0).IsVRegister());
CHECK(static_cast<CPURegister>(s0).IsVRegister());
@@ -13995,11 +13997,11 @@ TEST(cpureglist_utils_x) {
CHECK(!test.IncludesAliasOf(x4));
CHECK(!test.IncludesAliasOf(x30));
CHECK(!test.IncludesAliasOf(xzr));
- CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(sp));
CHECK(!test.IncludesAliasOf(w4));
CHECK(!test.IncludesAliasOf(w30));
CHECK(!test.IncludesAliasOf(wzr));
- CHECK(!test.IncludesAliasOf(wcsp));
+ CHECK(!test.IncludesAliasOf(wsp));
CHECK(!test.IncludesAliasOf(d0));
CHECK(!test.IncludesAliasOf(d1));
@@ -14059,13 +14061,13 @@ TEST(cpureglist_utils_w) {
CHECK(!test.IncludesAliasOf(x14));
CHECK(!test.IncludesAliasOf(x30));
CHECK(!test.IncludesAliasOf(xzr));
- CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(sp));
CHECK(!test.IncludesAliasOf(w0));
CHECK(!test.IncludesAliasOf(w9));
CHECK(!test.IncludesAliasOf(w14));
CHECK(!test.IncludesAliasOf(w30));
CHECK(!test.IncludesAliasOf(wzr));
- CHECK(!test.IncludesAliasOf(wcsp));
+ CHECK(!test.IncludesAliasOf(wsp));
CHECK(!test.IncludesAliasOf(d10));
CHECK(!test.IncludesAliasOf(d11));
@@ -14140,8 +14142,8 @@ TEST(cpureglist_utils_d) {
CHECK(!test.IncludesAliasOf(xzr));
CHECK(!test.IncludesAliasOf(wzr));
- CHECK(!test.IncludesAliasOf(csp));
- CHECK(!test.IncludesAliasOf(wcsp));
+ CHECK(!test.IncludesAliasOf(sp));
+ CHECK(!test.IncludesAliasOf(wsp));
CHECK(!test.IsEmpty());
@@ -14238,7 +14240,7 @@ TEST(printf) {
// Initialize x29 to the value of the stack pointer. We will use x29 as a
// temporary stack pointer later, and initializing it in this way allows the
// RegisterDump check to pass.
- __ Mov(x29, __ StackPointer());
+ __ Mov(x29, sp);
// Test simple integer arguments.
__ Mov(x0, 1234);
@@ -14288,10 +14290,8 @@ TEST(printf) {
__ Printf("%g\n", d10);
__ Printf("%%%%%s%%%c%%\n", x2, w13);
- // Print the stack pointer (csp).
- CHECK(csp.Is(__ StackPointer()));
- __ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
- __ StackPointer(), __ StackPointer().W());
+ // Print the stack pointer.
+ __ Printf("StackPointer(sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n", sp, wsp);
// Test with three arguments.
__ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
@@ -15089,7 +15089,7 @@ TEST(call_no_relocation) {
{
Assembler::BlockConstPoolScope scope(&masm);
call_start = buf + __ pc_offset();
- __ Call(buf + function.pos(), RelocInfo::NONE64);
+ __ Call(buf + function.pos(), RelocInfo::NONE);
return_address = buf + __ pc_offset();
}
__ Pop(xzr, lr);
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 5b79ff1fc1..b75820e8c4 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -220,7 +220,7 @@ TEST(AssemblerIa325) {
v8::internal::byte buffer[256];
Assembler assm(isolate, buffer, sizeof buffer);
- __ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE32));
+ __ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE));
__ ret(0);
CodeDesc desc;
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index d6bbe34e74..f120c9c418 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -87,7 +87,7 @@ TEST(1) {
#endif
__ lr(r3, r2);
- __ lhi(r2, Operand(0, kRelocInfo_NONEPTR));
+ __ lhi(r2, Operand(0, RelocInfo::NONE));
__ b(&C);
__ bind(&L);
@@ -95,7 +95,7 @@ TEST(1) {
__ ahi(r3, Operand(-1 & 0xFFFF));
__ bind(&C);
- __ cfi(r3, Operand(0, kRelocInfo_NONEPTR));
+ __ cfi(r3, Operand(0, RelocInfo::NONE));
__ bne(&L);
__ b(r14);
@@ -137,7 +137,7 @@ TEST(2) {
__ ahi(r3, Operand(-1 & 0xFFFF));
__ bind(&C);
- __ cfi(r3, Operand(0, kRelocInfo_NONEPTR));
+ __ cfi(r3, Operand(0, RelocInfo::NONE));
__ bne(&L);
__ b(r14);
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 7889a61a77..49e6bfd86d 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -15,6 +15,7 @@
#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/promise-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -212,7 +213,7 @@ TEST(LoadHeapNumberValue) {
CodeAssemblerTester asm_tester(isolate);
CodeStubAssembler m(asm_tester.state());
Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(1234);
- m.Return(m.SmiFromWord32(m.Signed(
+ m.Return(m.SmiFromInt32(m.Signed(
m.ChangeFloat64ToUint32(m.LoadHeapNumberValue(m.HeapConstant(number))))));
FunctionTester ft(asm_tester.GenerateCode());
MaybeHandle<Object> result = ft.Call();
@@ -224,7 +225,7 @@ TEST(LoadInstanceType) {
CodeAssemblerTester asm_tester(isolate);
CodeStubAssembler m(asm_tester.state());
Handle<HeapObject> undefined = isolate->factory()->undefined_value();
- m.Return(m.SmiFromWord32(m.LoadInstanceType(m.HeapConstant(undefined))));
+ m.Return(m.SmiFromInt32(m.LoadInstanceType(m.HeapConstant(undefined))));
FunctionTester ft(asm_tester.GenerateCode());
MaybeHandle<Object> result = ft.Call();
CHECK_EQ(InstanceType::ODDBALL_TYPE,
@@ -252,8 +253,8 @@ TEST(JSFunction) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- m.Return(m.SmiFromWord32(m.Int32Add(m.SmiToWord32(m.Parameter(1)),
- m.SmiToWord32(m.Parameter(2)))));
+ m.Return(m.SmiFromInt32(
+ m.Int32Add(m.SmiToInt32(m.Parameter(1)), m.SmiToInt32(m.Parameter(2)))));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -268,8 +269,8 @@ TEST(ComputeIntegerHash) {
const int kNumParams = 2;
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- m.Return(m.SmiFromWord32(m.ComputeIntegerHash(
- m.SmiUntag(m.Parameter(0)), m.SmiToWord32(m.Parameter(1)))));
+ m.Return(m.SmiFromInt32(m.ComputeIntegerHash(m.SmiUntag(m.Parameter(0)),
+ m.SmiToInt32(m.Parameter(1)))));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -1894,7 +1895,7 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
Return(length);
BIND(&bailout);
- Return(SmiTag(IntPtrAdd(arg_index, IntPtrConstant(2))));
+ Return(SmiTag(IntPtrAdd(arg_index.value(), IntPtrConstant(2))));
FunctionTester ft(csa_tester->GenerateCode(), kNumParams);
@@ -2069,74 +2070,6 @@ TEST(AllocateAndSetJSPromise) {
CHECK(!js_promise->has_handler());
}
-TEST(AllocatePromiseReactionJobInfo) {
- Isolate* isolate(CcTest::InitIsolateOnce());
-
- const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
- PromiseBuiltinsAssembler p(asm_tester.state());
-
- Node* const context = m.Parameter(kNumParams + 2);
- Node* const tasks =
- m.AllocateFixedArray(PACKED_ELEMENTS, m.IntPtrConstant(1));
- m.StoreFixedArrayElement(tasks, 0, m.UndefinedConstant());
- Node* const deferred_promise =
- m.AllocateFixedArray(PACKED_ELEMENTS, m.IntPtrConstant(1));
- m.StoreFixedArrayElement(deferred_promise, 0, m.UndefinedConstant());
- Node* const info = m.AllocatePromiseReactionJobInfo(
- m.SmiConstant(1), tasks, deferred_promise, m.UndefinedConstant(),
- m.UndefinedConstant(), context);
- m.Return(info);
-
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<Object> result =
- ft.Call(isolate->factory()->undefined_value()).ToHandleChecked();
- CHECK(result->IsPromiseReactionJobInfo());
- Handle<PromiseReactionJobInfo> promise_info =
- Handle<PromiseReactionJobInfo>::cast(result);
- CHECK_EQ(Smi::FromInt(1), promise_info->value());
- CHECK(promise_info->tasks()->IsFixedArray());
- CHECK(promise_info->deferred_promise()->IsFixedArray());
- CHECK(promise_info->deferred_on_resolve()->IsUndefined(isolate));
- CHECK(promise_info->deferred_on_reject()->IsUndefined(isolate));
- CHECK(promise_info->context()->IsContext());
-}
-
-TEST(AllocatePromiseResolveThenableJobInfo) {
- Isolate* isolate(CcTest::InitIsolateOnce());
-
- const int kNumParams = 1;
- CodeAssemblerTester asm_tester(isolate, kNumParams);
- PromiseBuiltinsAssembler p(asm_tester.state());
-
- Node* const context = p.Parameter(kNumParams + 2);
- Node* const native_context = p.LoadNativeContext(context);
- Node* const thenable = p.AllocateAndInitJSPromise(context);
- Node* const then =
- p.GetProperty(context, thenable, isolate->factory()->then_string());
- Node* resolve = nullptr;
- Node* reject = nullptr;
- std::tie(resolve, reject) = p.CreatePromiseResolvingFunctions(
- thenable, p.FalseConstant(), native_context);
-
- Node* const info = p.AllocatePromiseResolveThenableJobInfo(
- thenable, then, resolve, reject, context);
- p.Return(info);
-
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<Object> result =
- ft.Call(isolate->factory()->undefined_value()).ToHandleChecked();
- CHECK(result->IsPromiseResolveThenableJobInfo());
- Handle<PromiseResolveThenableJobInfo> promise_info =
- Handle<PromiseResolveThenableJobInfo>::cast(result);
- CHECK(promise_info->thenable()->IsJSPromise());
- CHECK(promise_info->then()->IsJSFunction());
- CHECK(promise_info->resolve()->IsJSFunction());
- CHECK(promise_info->reject()->IsJSFunction());
- CHECK(promise_info->context()->IsContext());
-}
-
TEST(IsSymbol) {
Isolate* isolate(CcTest::InitIsolateOnce());
@@ -2326,8 +2259,9 @@ TEST(AllocateFunctionWithMapAndContext) {
m.AllocateAndInitJSPromise(context, m.UndefinedConstant());
Node* promise_context = m.CreatePromiseResolvingFunctionsContext(
promise, m.BooleanConstant(false), native_context);
- Node* resolve_info =
- m.LoadContextElement(native_context, Context::PROMISE_RESOLVE_SHARED_FUN);
+ Node* resolve_info = m.LoadContextElement(
+ native_context,
+ Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX);
Node* const map = m.LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
Node* const resolve =
@@ -2341,10 +2275,12 @@ TEST(AllocateFunctionWithMapAndContext) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(result_obj);
CHECK_EQ(isolate->heap()->empty_property_array(), fun->property_array());
CHECK_EQ(isolate->heap()->empty_fixed_array(), fun->elements());
- CHECK_EQ(isolate->heap()->undefined_cell(), fun->feedback_vector_cell());
+ CHECK_EQ(isolate->heap()->many_closures_cell(), fun->feedback_cell());
CHECK(!fun->has_prototype_slot());
- CHECK_EQ(*isolate->promise_resolve_shared_fun(), fun->shared());
- CHECK_EQ(isolate->promise_resolve_shared_fun()->code(), fun->code());
+ CHECK_EQ(*isolate->promise_capability_default_resolve_shared_fun(),
+ fun->shared());
+ CHECK_EQ(isolate->promise_capability_default_resolve_shared_fun()->code(),
+ fun->code());
}
TEST(CreatePromiseGetCapabilitiesExecutorContext) {
@@ -2357,7 +2293,7 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
Node* const context = m.Parameter(kNumParams + 2);
Node* const native_context = m.LoadNativeContext(context);
- Node* const map = m.LoadRoot(Heap::kTuple3MapRootIndex);
+ Node* const map = m.LoadRoot(Heap::kPromiseCapabilityMapRootIndex);
Node* const capability = m.AllocateStruct(map);
m.StoreObjectFieldNoWriteBarrier(
capability, PromiseCapability::kPromiseOffset, m.UndefinedConstant());
@@ -2396,8 +2332,10 @@ TEST(NewPromiseCapability) {
Node* const promise_constructor =
m.LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const debug_event = m.TrueConstant();
Node* const capability =
- m.NewPromiseCapability(context, promise_constructor);
+ m.CallBuiltin(Builtins::kNewPromiseCapability, context,
+ promise_constructor, debug_event);
m.Return(capability);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2411,10 +2349,10 @@ TEST(NewPromiseCapability) {
CHECK(result->promise()->IsJSPromise());
CHECK(result->resolve()->IsJSFunction());
CHECK(result->reject()->IsJSFunction());
- CHECK_EQ(isolate->native_context()->promise_resolve_shared_fun(),
- JSFunction::cast(result->resolve())->shared());
- CHECK_EQ(isolate->native_context()->promise_reject_shared_fun(),
+ CHECK_EQ(*isolate->promise_capability_default_reject_shared_fun(),
JSFunction::cast(result->reject())->shared());
+ CHECK_EQ(*isolate->promise_capability_default_resolve_shared_fun(),
+ JSFunction::cast(result->resolve())->shared());
Handle<JSFunction> callbacks[] = {
handle(JSFunction::cast(result->resolve())),
@@ -2440,7 +2378,9 @@ TEST(NewPromiseCapability) {
Node* const context = m.Parameter(kNumParams + 2);
Node* const constructor = m.Parameter(1);
- Node* const capability = m.NewPromiseCapability(context, constructor);
+ Node* const debug_event = m.TrueConstant();
+ Node* const capability = m.CallBuiltin(Builtins::kNewPromiseCapability,
+ context, constructor, debug_event);
m.Return(capability);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2672,7 +2612,7 @@ TEST(LoadJSArrayElementsMap) {
CodeStubAssembler m(asm_tester.state());
Node* context = m.Parameter(kNumParams + 2);
Node* native_context = m.LoadNativeContext(context);
- Node* kind = m.SmiToWord32(m.Parameter(0));
+ Node* kind = m.SmiToInt32(m.Parameter(0));
m.Return(m.LoadJSArrayElementsMap(kind, native_context));
}
@@ -2731,7 +2671,7 @@ TEST(GotoIfNotWhiteSpaceOrLineTerminator) {
{ // Returns true if whitespace, false otherwise.
Label if_not_whitespace(&m);
- m.GotoIfNotWhiteSpaceOrLineTerminator(m.SmiToWord32(m.Parameter(0)),
+ m.GotoIfNotWhiteSpaceOrLineTerminator(m.SmiToInt32(m.Parameter(0)),
&if_not_whitespace);
m.Return(m.TrueConstant());
@@ -2787,7 +2727,7 @@ TEST(IsNumberArrayIndex) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
{
CodeStubAssembler m(asm_tester.state());
- m.Return(m.SmiFromWord32(m.IsNumberArrayIndex(m.Parameter(0))));
+ m.Return(m.SmiFromInt32(m.IsNumberArrayIndex(m.Parameter(0))));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index d042ea617b..b80208668b 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -116,7 +116,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index 1086bea7ce..46f6b5b3c5 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -129,7 +129,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index b1df94feed..4cbe023730 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -129,7 +129,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index c09dac24ea..e7c393acb0 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -126,7 +126,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(isolate, buffer, allocated);
+ Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index f9195c57fa..9257c0aa6b 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -66,11 +66,9 @@ static Handle<JSFunction> Compile(const char* source) {
CStrVector(source)).ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- source_code, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
- MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
+ source_code, Compiler::ScriptDetails(), v8::ScriptOriginOptions(),
nullptr, nullptr, v8::ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
- MaybeHandle<FixedArray>())
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index dd93a7eaf8..66bef1584a 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -168,140 +168,30 @@ static bool HasBreakInfo(v8::Local<v8::Function> fun) {
// Set a break point in a function with a position relative to function start,
// and return the associated break point number.
-static int SetBreakPoint(v8::Local<v8::Function> fun, int position) {
+static i::Handle<i::BreakPoint> SetBreakPoint(v8::Local<v8::Function> fun,
+ int position,
+ const char* condition = nullptr) {
i::Handle<i::JSFunction> function =
i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*fun));
position += function->shared()->start_position();
- static int break_point = 0;
- v8::internal::Isolate* isolate = function->GetIsolate();
- v8::internal::Debug* debug = isolate->debug();
- debug->SetBreakPoint(
- function,
- Handle<Object>(v8::internal::Smi::FromInt(++break_point), isolate),
- &position);
+ static int break_point_index = 0;
+ i::Isolate* isolate = function->GetIsolate();
+ i::Handle<i::String> condition_string =
+ condition ? isolate->factory()->NewStringFromAsciiChecked(condition)
+ : isolate->factory()->empty_string();
+ i::Debug* debug = isolate->debug();
+ i::Handle<i::BreakPoint> break_point =
+ isolate->factory()->NewBreakPoint(++break_point_index, condition_string);
+
+ debug->SetBreakPoint(function, break_point, &position);
return break_point;
}
-// Set a break point in a function using the Debug object and return the
-// associated break point number.
-static int SetBreakPointFromJS(v8::Isolate* isolate,
- const char* function_name,
- int line, int position) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- SNPrintF(buffer,
- "debug.Debug.setBreakPoint(%s,%d,%d)",
- function_name, line, position);
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Local<v8::Value> value = CompileRunChecked(isolate, buffer.start());
- return value->Int32Value(isolate->GetCurrentContext()).FromJust();
-}
-
-
-// Set a break point in a script identified by id using the global Debug object.
-static int SetScriptBreakPointByIdFromJS(v8::Isolate* isolate, int script_id,
- int line, int column) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- if (column >= 0) {
- // Column specified set script break point on precise location.
- SNPrintF(buffer,
- "debug.Debug.setScriptBreakPointById(%d,%d,%d)",
- script_id, line, column);
- } else {
- // Column not specified set script break point on line.
- SNPrintF(buffer,
- "debug.Debug.setScriptBreakPointById(%d,%d)",
- script_id, line);
- }
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- {
- v8::TryCatch try_catch(isolate);
- v8::Local<v8::Value> value = CompileRunChecked(isolate, buffer.start());
- CHECK(!try_catch.HasCaught());
- return value->Int32Value(isolate->GetCurrentContext()).FromJust();
- }
-}
-
-
-// Set a break point in a script identified by name using the global Debug
-// object.
-static int SetScriptBreakPointByNameFromJS(v8::Isolate* isolate,
- const char* script_name, int line,
- int column) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- if (column >= 0) {
- // Column specified set script break point on precise location.
- SNPrintF(buffer,
- "debug.Debug.setScriptBreakPointByName(\"%s\",%d,%d)",
- script_name, line, column);
- } else {
- // Column not specified set script break point on line.
- SNPrintF(buffer,
- "debug.Debug.setScriptBreakPointByName(\"%s\",%d)",
- script_name, line);
- }
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- {
- v8::TryCatch try_catch(isolate);
- v8::Local<v8::Value> value = CompileRunChecked(isolate, buffer.start());
- CHECK(!try_catch.HasCaught());
- return value->Int32Value(isolate->GetCurrentContext()).FromJust();
- }
-}
-
-
-// Clear a break point.
-static void ClearBreakPoint(int break_point) {
+static void ClearBreakPoint(i::Handle<i::BreakPoint> break_point) {
v8::internal::Isolate* isolate = CcTest::i_isolate();
v8::internal::Debug* debug = isolate->debug();
- debug->ClearBreakPoint(
- Handle<Object>(v8::internal::Smi::FromInt(break_point), isolate));
-}
-
-
-// Clear a break point using the global Debug object.
-static void ClearBreakPointFromJS(v8::Isolate* isolate,
- int break_point_number) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- SNPrintF(buffer,
- "debug.Debug.clearBreakPoint(%d)",
- break_point_number);
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- CompileRunChecked(isolate, buffer.start());
-}
-
-
-static void EnableScriptBreakPointFromJS(v8::Isolate* isolate,
- int break_point_number) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- SNPrintF(buffer,
- "debug.Debug.enableScriptBreakPoint(%d)",
- break_point_number);
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- CompileRunChecked(isolate, buffer.start());
-}
-
-
-static void DisableScriptBreakPointFromJS(v8::Isolate* isolate,
- int break_point_number) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- SNPrintF(buffer,
- "debug.Debug.disableScriptBreakPoint(%d)",
- break_point_number);
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- CompileRunChecked(isolate, buffer.start());
-}
-
-
-static void ChangeScriptBreakPointConditionFromJS(v8::Isolate* isolate,
- int break_point_number,
- const char* condition) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- SNPrintF(buffer,
- "debug.Debug.changeScriptBreakPointCondition(%d, \"%s\")",
- break_point_number, condition);
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- CompileRunChecked(isolate, buffer.start());
+ debug->ClearBreakPoint(break_point);
}
@@ -585,8 +475,6 @@ static void DebugEventBreakPointHitCount(
int exception_hit_count = 0;
int uncaught_exception_hit_count = 0;
int last_js_stack_height = -1;
-v8::Local<v8::Function> debug_event_listener_callback;
-int debug_event_listener_callback_result;
static void DebugEventCounterClear() {
break_point_hit_count = 0;
@@ -633,16 +521,6 @@ static void DebugEventCounter(
frame_count->Call(context, exec_state, kArgc, argv).ToLocalChecked();
last_js_stack_height = result->Int32Value(context).FromJust();
}
-
- // Run callback from DebugEventListener and check the result.
- if (!debug_event_listener_callback.IsEmpty()) {
- v8::Local<v8::Value> result =
- debug_event_listener_callback->Call(context, event_data, 0, nullptr)
- .ToLocalChecked();
- CHECK(!result.IsEmpty());
- CHECK_EQ(debug_event_listener_callback_result,
- result->Int32Value(context).FromJust());
- }
}
@@ -701,7 +579,7 @@ static void DebugEventEvaluate(
// This debug event listener removes a breakpoint in a function
-int debug_event_remove_break_point = 0;
+i::Handle<i::BreakPoint> debug_event_remove_break_point;
static void DebugEventRemoveBreakPoint(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
@@ -896,12 +774,12 @@ TEST(DebugInfo) {
CHECK(!HasBreakInfo(bar));
EnableDebugger(env->GetIsolate());
// One function (foo) is debugged.
- int bp1 = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp1 = SetBreakPoint(foo, 0);
CHECK_EQ(1, v8::internal::GetDebuggedFunctions()->length());
CHECK(HasBreakInfo(foo));
CHECK(!HasBreakInfo(bar));
// Two functions are debugged.
- int bp2 = SetBreakPoint(bar, 0);
+ i::Handle<i::BreakPoint> bp2 = SetBreakPoint(bar, 0);
CHECK_EQ(2, v8::internal::GetDebuggedFunctions()->length());
CHECK(HasBreakInfo(foo));
CHECK(HasBreakInfo(bar));
@@ -934,7 +812,7 @@ TEST(BreakPointICStore) {
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
- int bp = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0);
foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
@@ -949,6 +827,37 @@ TEST(BreakPointICStore) {
CheckDebuggerUnloaded();
}
+// Test that a break point can be set at an IC store location.
+TEST(BreakPointCondition) {
+ break_point_hit_count = 0;
+ DebugLocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
+ CompileRun("var a = false");
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo() { return 1 }", "foo");
+ // Run without breakpoints.
+ CompileRun("foo()");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0, "a == true");
+ CompileRun("foo()");
+ CHECK_EQ(0, break_point_hit_count);
+
+ CompileRun("a = true");
+ CompileRun("foo()");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("foo()");
+ CHECK_EQ(1, break_point_hit_count);
+
+ SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
// Test that a break point can be set at an IC load location.
TEST(BreakPointICLoad) {
@@ -966,7 +875,7 @@ TEST(BreakPointICLoad) {
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
- int bp = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0);
foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
@@ -997,7 +906,7 @@ TEST(BreakPointICCall) {
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
- int bp = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0);
foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
@@ -1032,7 +941,7 @@ TEST(BreakPointICCallWithGC) {
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
- int bp = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0);
CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
@@ -1073,7 +982,7 @@ TEST(BreakPointConstructCallWithGC) {
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
- int bp = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0);
CHECK_EQ(1, foo->Call(context, env->Global(), 0, nullptr)
.ToLocalChecked()
->Int32Value(context)
@@ -1120,7 +1029,7 @@ TEST(BreakPointReturn) {
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
- int bp = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0);
foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ(0, last_source_line);
@@ -1139,6 +1048,237 @@ TEST(BreakPointReturn) {
CheckDebuggerUnloaded();
}
+// Test that a break point can be set at a return store location.
+TEST(BreakPointBuiltin) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_block_concurrent_recompilation = true;
+ i::FLAG_experimental_inline_promise_constructor = true;
+ DebugLocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
+
+ // === Test simple builtin ===
+ break_point_hit_count = 0;
+ v8::Local<v8::Function> builtin =
+ CompileRun("String.prototype.repeat").As<v8::Function>();
+ CompileRun("'a'.repeat(10)");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(builtin, 0);
+ CompileRun("'b'.repeat(10)");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("'b'.repeat(10)");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // === Test bound function from a builtin ===
+ break_point_hit_count = 0;
+ builtin = CompileRun(
+ "var boundrepeat = String.prototype.repeat.bind('a');"
+ "String.prototype.repeat")
+ .As<v8::Function>();
+ CompileRun("boundrepeat(10)");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(builtin, 0);
+ CompileRun("boundrepeat(10)");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("boundrepeat(10)");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // === Test constructor builtin (for ones with normal construct stubs) ===
+ break_point_hit_count = 0;
+ builtin = CompileRun("Promise").As<v8::Function>();
+ CompileRun("new Promise(()=>{})");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(builtin, 0);
+ CompileRun("new Promise(()=>{})");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("new Promise(()=>{})");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // === Test inlined builtin ===
+ break_point_hit_count = 0;
+ builtin = CompileRun("Math.sin").As<v8::Function>();
+ CompileRun("function test(x) { return 1 + Math.sin(x) }");
+ CompileRun(
+ "test(0.5); test(0.6);"
+ "%OptimizeFunctionOnNextCall(test); test(0.7);");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(builtin, 0);
+ CompileRun("Math.sin(0.1);");
+ CHECK_EQ(1, break_point_hit_count);
+ CompileRun("test(0.2);");
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Re-optimize.
+ CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun("test(0.3);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("test(0.3);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // === Test inlined bound builtin ===
+ break_point_hit_count = 0;
+ builtin = CompileRun("String.prototype.repeat").As<v8::Function>();
+ CompileRun("function test(x) { return 'a' + boundrepeat(x) }");
+ CompileRun(
+ "test(4); test(5);"
+ "%OptimizeFunctionOnNextCall(test); test(6);");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(builtin, 0);
+ CompileRun("'a'.repeat(2);");
+ CHECK_EQ(1, break_point_hit_count);
+ CompileRun("test(7);");
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Re-optimize.
+ CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun("test(8);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("test(9);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // === Test inlined constructor builtin (regular construct builtin) ===
+ break_point_hit_count = 0;
+ builtin = CompileRun("Promise").As<v8::Function>();
+ CompileRun("function test(x) { return new Promise(()=>x); }");
+ CompileRun(
+ "test(4); test(5);"
+ "%OptimizeFunctionOnNextCall(test); test(6);");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(builtin, 0);
+ CompileRun("new Promise();");
+ CHECK_EQ(1, break_point_hit_count);
+ CompileRun("test(7);");
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Re-optimize.
+ CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun("test(8);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("test(9);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // === Test concurrent optimization ===
+ break_point_hit_count = 0;
+ builtin = CompileRun("Math.sin").As<v8::Function>();
+ CompileRun("function test(x) { return 1 + Math.sin(x) }");
+ // Trigger concurrent compile job. It is suspended until unblock.
+ CompileRun(
+ "test(0.5); test(0.6);"
+ "%OptimizeFunctionOnNextCall(test, 'concurrent'); test(0.7);");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(builtin, 0);
+ // Have the concurrent compile job finish now.
+ CompileRun(
+ "%UnblockConcurrentRecompilation();"
+ "%GetOptimizationStatus(test, 'sync');");
+ CompileRun("test(0.2);");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("test(0.3);");
+ CHECK_EQ(1, break_point_hit_count);
+
+ // === Test builtin represented as operator ===
+ break_point_hit_count = 0;
+ builtin = CompileRun("String.prototype.indexOf").As<v8::Function>();
+ CompileRun("function test(x) { return 1 + 'foo'.indexOf(x) }");
+ CompileRun(
+ "test('a'); test('b');"
+ "%OptimizeFunctionOnNextCall(test); test('c');");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ bp = SetBreakPoint(builtin, 0);
+ CompileRun("'bar'.indexOf('x');");
+ CHECK_EQ(1, break_point_hit_count);
+ CompileRun("test('d');");
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Re-optimize.
+ CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun("test('e');");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("test('f');");
+ CHECK_EQ(3, break_point_hit_count);
+
+ SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
+TEST(BreakPointInlining) {
+ i::FLAG_allow_natives_syntax = true;
+ break_point_hit_count = 0;
+ DebugLocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
+
+ break_point_hit_count = 0;
+ v8::Local<v8::Function> inlinee =
+ CompileRun("function f(x) { return x*2; } f").As<v8::Function>();
+ CompileRun("function test(x) { return 1 + f(x) }");
+ CompileRun(
+ "test(0.5); test(0.6);"
+ "%OptimizeFunctionOnNextCall(test); test(0.7);");
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(inlinee, 0);
+ CompileRun("f(0.1);");
+ CHECK_EQ(1, break_point_hit_count);
+ CompileRun("test(0.2);");
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Re-optimize.
+ CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun("test(0.3);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ CompileRun("test(0.3);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
static void CallWithBreakPoints(v8::Local<v8::Context> context,
v8::Local<v8::Object> recv,
@@ -1273,716 +1413,6 @@ TEST(BreakPointSurviveGC) {
}
-// Test that break points can be set using the global Debug object.
-TEST(BreakPointThroughJavaScript) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = env.context();
- env.ExposeDebug();
-
- SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
- CompileRunChecked(isolate, "function bar(){}");
- CompileFunction(isolate, "function foo(){bar();bar();}", "foo");
- // 012345678901234567890
- // 1 2
- // Break points are set at position 3 and 9
- v8::Local<v8::String> source = v8_str(env->GetIsolate(), "foo()");
- v8::Local<v8::Script> foo =
- v8::Script::Compile(context, source).ToLocalChecked();
-
- CHECK_EQ(0, break_point_hit_count);
-
- // Run with one breakpoint
- int bp1 = SetBreakPointFromJS(env->GetIsolate(), "foo", 0, 3);
- foo->Run(context).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
- foo->Run(context).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- // Run with two breakpoints
- int bp2 = SetBreakPointFromJS(env->GetIsolate(), "foo", 0, 9);
- foo->Run(context).ToLocalChecked();
- CHECK_EQ(4, break_point_hit_count);
- foo->Run(context).ToLocalChecked();
- CHECK_EQ(6, break_point_hit_count);
-
- // Run with one breakpoint
- ClearBreakPointFromJS(env->GetIsolate(), bp2);
- foo->Run(context).ToLocalChecked();
- CHECK_EQ(7, break_point_hit_count);
- foo->Run(context).ToLocalChecked();
- CHECK_EQ(8, break_point_hit_count);
-
- // Run without breakpoints.
- ClearBreakPointFromJS(env->GetIsolate(), bp1);
- foo->Run(context).ToLocalChecked();
- CHECK_EQ(8, break_point_hit_count);
-
- SetDebugEventListener(isolate, nullptr);
- CheckDebuggerUnloaded();
-
- // Make sure that the break point numbers are consecutive.
- CHECK_EQ(1, bp1);
- CHECK_EQ(2, bp2);
-}
-
-
-// Test that break points on scripts identified by name can be set using the
-// global Debug object.
-TEST(ScriptBreakPointByNameThroughJavaScript) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = env.context();
- env.ExposeDebug();
-
- SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
-
- v8::Local<v8::String> script = v8_str(isolate,
- "function f() {\n"
- " function h() {\n"
- " a = 0; // line 2\n"
- " }\n"
- " b = 1; // line 4\n"
- " return h();\n"
- "}\n"
- "\n"
- "function g() {\n"
- " function h() {\n"
- " a = 0;\n"
- " }\n"
- " b = 2; // line 12\n"
- " h();\n"
- " b = 3; // line 14\n"
- " f(); // line 15\n"
- "}");
-
- // Compile the script and get the two functions.
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(isolate, "test"));
- v8::Script::Compile(context, script, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(context, v8_str(isolate, "f")).ToLocalChecked());
- v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(context, v8_str(isolate, "g")).ToLocalChecked());
-
- // Call f and g without break points.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Call f and g with break point on line 12.
- int sbp1 = SetScriptBreakPointByNameFromJS(isolate, "test", 12, 0);
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
-
- // Remove the break point again.
- break_point_hit_count = 0;
- ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Call f and g with break point on line 2.
- int sbp2 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 2, 0);
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- // Call f and g with break point on line 2, 4, 12, 14 and 15.
- int sbp3 = SetScriptBreakPointByNameFromJS(isolate, "test", 4, 0);
- int sbp4 = SetScriptBreakPointByNameFromJS(isolate, "test", 12, 0);
- int sbp5 = SetScriptBreakPointByNameFromJS(isolate, "test", 14, 0);
- int sbp6 = SetScriptBreakPointByNameFromJS(isolate, "test", 15, 0);
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(7, break_point_hit_count);
-
- // Remove all the break points again.
- break_point_hit_count = 0;
- ClearBreakPointFromJS(isolate, sbp2);
- ClearBreakPointFromJS(isolate, sbp3);
- ClearBreakPointFromJS(isolate, sbp4);
- ClearBreakPointFromJS(isolate, sbp5);
- ClearBreakPointFromJS(isolate, sbp6);
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- SetDebugEventListener(isolate, nullptr);
- CheckDebuggerUnloaded();
-
- // Make sure that the break point numbers are consecutive.
- CHECK_EQ(1, sbp1);
- CHECK_EQ(2, sbp2);
- CHECK_EQ(3, sbp3);
- CHECK_EQ(4, sbp4);
- CHECK_EQ(5, sbp5);
- CHECK_EQ(6, sbp6);
-}
-
-
-TEST(ScriptBreakPointByIdThroughJavaScript) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = env.context();
- env.ExposeDebug();
-
- SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
-
- v8::Local<v8::String> source = v8_str(isolate,
- "function f() {\n"
- " function h() {\n"
- " a = 0; // line 2\n"
- " }\n"
- " b = 1; // line 4\n"
- " return h();\n"
- "}\n"
- "\n"
- "function g() {\n"
- " function h() {\n"
- " a = 0;\n"
- " }\n"
- " b = 2; // line 12\n"
- " h();\n"
- " b = 3; // line 14\n"
- " f(); // line 15\n"
- "}");
-
- // Compile the script and get the two functions.
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(isolate, "test"));
- v8::Local<v8::Script> script =
- v8::Script::Compile(context, source, &origin).ToLocalChecked();
- script->Run(context).ToLocalChecked();
- v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(context, v8_str(isolate, "f")).ToLocalChecked());
- v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(context, v8_str(isolate, "g")).ToLocalChecked());
-
- // Get the script id knowing that internally it is a 32 integer.
- int script_id = script->GetUnboundScript()->GetId();
-
- // Call f and g without break points.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Call f and g with break point on line 12.
- int sbp1 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 12, 0);
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
-
- // Remove the break point again.
- break_point_hit_count = 0;
- ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Call f and g with break point on line 2.
- int sbp2 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 2, 0);
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- // Call f and g with break point on line 2, 4, 12, 14 and 15.
- int sbp3 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 4, 0);
- int sbp4 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 12, 0);
- int sbp5 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 14, 0);
- int sbp6 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 15, 0);
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(7, break_point_hit_count);
-
- // Remove all the break points again.
- break_point_hit_count = 0;
- ClearBreakPointFromJS(env->GetIsolate(), sbp2);
- ClearBreakPointFromJS(env->GetIsolate(), sbp3);
- ClearBreakPointFromJS(env->GetIsolate(), sbp4);
- ClearBreakPointFromJS(env->GetIsolate(), sbp5);
- ClearBreakPointFromJS(env->GetIsolate(), sbp6);
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- SetDebugEventListener(isolate, nullptr);
- CheckDebuggerUnloaded();
-
- // Make sure that the break point numbers are consecutive.
- CHECK_EQ(1, sbp1);
- CHECK_EQ(2, sbp2);
- CHECK_EQ(3, sbp3);
- CHECK_EQ(4, sbp4);
- CHECK_EQ(5, sbp5);
- CHECK_EQ(6, sbp6);
-}
-
-
-// Test conditional script break points.
-TEST(EnableDisableScriptBreakPoint) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = env.context();
- env.ExposeDebug();
-
- SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
-
- v8::Local<v8::String> script = v8_str(isolate,
- "function f() {\n"
- " a = 0; // line 1\n"
- "};");
-
- // Compile the script and get function f.
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(isolate, "test"));
- v8::Script::Compile(context, script, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(context, v8_str(isolate, "f")).ToLocalChecked());
-
- // Set script break point on line 1 (in function f).
- int sbp = SetScriptBreakPointByNameFromJS(isolate, "test", 1, 0);
-
- // Call f while enabeling and disabling the script break point.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
-
- DisableScriptBreakPointFromJS(isolate, sbp);
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
-
- EnableScriptBreakPointFromJS(isolate, sbp);
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- DisableScriptBreakPointFromJS(isolate, sbp);
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- SetDebugEventListener(isolate, nullptr);
- CheckDebuggerUnloaded();
-}
-
-
-// Test conditional script break points.
-TEST(ConditionalScriptBreakPoint) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
-
- v8::Local<v8::String> script = v8_str(env->GetIsolate(),
- "count = 0;\n"
- "function f() {\n"
- " g(count++); // line 2\n"
- "};\n"
- "function g(x) {\n"
- " var a=x; // line 5\n"
- "};");
-
- // Compile the script and get function f.
- v8::Local<v8::Context> context = env.context();
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(env->GetIsolate(), "test"));
- v8::Script::Compile(context, script, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "f"))
- .ToLocalChecked());
-
- // Set script break point on line 5 (in function g).
- int sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 5, 0);
-
- // Call f with different conditions on the script break point.
- break_point_hit_count = 0;
- ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "false");
- f->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "true");
- break_point_hit_count = 0;
- f->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
-
- ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "x % 2 == 0");
- break_point_hit_count = 0;
- for (int i = 0; i < 10; i++) {
- f->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
- }
- CHECK_EQ(5, break_point_hit_count);
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
-
-// Test when several scripts has the same script data
-TEST(ScriptBreakPointMultiple) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
-
- v8::Local<v8::Context> context = env.context();
- v8::Local<v8::Function> f;
- v8::Local<v8::String> script_f = v8_str(env->GetIsolate(),
- "function f() {\n"
- " a = 0; // line 1\n"
- "}");
-
- v8::Local<v8::Function> g;
- v8::Local<v8::String> script_g = v8_str(env->GetIsolate(),
- "function g() {\n"
- " b = 0; // line 1\n"
- "}");
-
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(env->GetIsolate(), "test"));
-
- // Compile the scripts with same script data and get the functions.
- v8::Script::Compile(context, script_f, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- f = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "f"))
- .ToLocalChecked());
- v8::Script::Compile(context, script_g, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- g = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "g"))
- .ToLocalChecked());
-
- int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
-
- // Call f and g and check that the script break point is active.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- // Clear the script break point.
- ClearBreakPointFromJS(env->GetIsolate(), sbp);
-
- // Call f and g and check that the script break point is no longer active.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Set script break point with the scripts loaded.
- sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
-
- // Call f and g and check that the script break point is active.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
-
-// Test the script origin which has both name and line offset.
-TEST(ScriptBreakPointLineOffset) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
-
- v8::Local<v8::Context> context = env.context();
- v8::Local<v8::Function> f;
- v8::Local<v8::String> script =
- v8_str(env->GetIsolate(),
- "function f() {\n"
- " a = 0; // line 8 as this script has line offset 7\n"
- " b = 0; // line 9 as this script has line offset 7\n"
- "}");
-
- // Create script origin both name and line offset.
- v8::ScriptOrigin origin(v8_str(env->GetIsolate(), "test.html"),
- v8::Integer::New(env->GetIsolate(), 7));
-
- // Compile the script and get the function.
- v8::Script::Compile(context, script, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
-
- int sbp1 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 8, 0);
- int sbp2 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 9, 0);
-
- f = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "f"))
- .ToLocalChecked());
-
- // Call f and check that the script break point is active.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
-
- // Clear the script break points.
- ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- ClearBreakPointFromJS(env->GetIsolate(), sbp2);
-
- // Call f and check that no script break points are active.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Set a script break point with the script loaded.
- sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 9, 0);
-
- // Call f and check that the script break point is active.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
-
-// Test script break points set on lines.
-TEST(ScriptBreakPointLine) {
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- // Create a function for checking the function when hitting a break point.
- frame_function_name = CompileFunction(&env,
- frame_function_name_source,
- "frame_function_name");
-
- SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
-
- v8::Local<v8::Context> context = env.context();
- v8::Local<v8::Function> f;
- v8::Local<v8::Function> g;
- v8::Local<v8::String> script =
- v8_str(env->GetIsolate(),
- "a = 0 // line 0\n"
- "function f() {\n"
- " a = 1; // line 2\n"
- "}\n"
- " a = 2; // line 4\n"
- " /* xx */ function g() { // line 5\n"
- " function h() { // line 6\n"
- " a = 3; // line 7\n"
- " }\n"
- " h(); // line 9\n"
- " a = 4; // line 10\n"
- " }\n"
- " a=5; // line 12");
-
- // Compile the script and get the function.
- break_point_hit_count = 0;
- v8::ScriptOrigin origin(v8_str(env->GetIsolate(), "test.html"),
- v8::Integer::New(env->GetIsolate(), 0));
- v8::Local<v8::Script> compiled =
- v8::Script::Compile(context, script, &origin).ToLocalChecked();
-
- int sbp1 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 0, -1);
- int sbp2 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 1, -1);
- int sbp3 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 5, -1);
-
- compiled->Run(context).ToLocalChecked();
-
- f = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "f"))
- .ToLocalChecked());
- g = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "g"))
- .ToLocalChecked());
-
- // Check that a break point was hit when the script was run.
- CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ(0, StrLength(last_function_hit));
-
- // Call f and check that the script break point.
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(2, break_point_hit_count);
- CHECK_EQ(0, strcmp("f", last_function_hit));
-
- // Call g and check that the script break point.
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(3, break_point_hit_count);
- CHECK_EQ(0, strcmp("g", last_function_hit));
-
- // Clear the script break point on g and set one on h.
- ClearBreakPointFromJS(env->GetIsolate(), sbp3);
- int sbp4 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 6, -1);
-
- // Call g and check that the script break point in h is hit.
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(4, break_point_hit_count);
- CHECK_EQ(0, strcmp("h", last_function_hit));
-
- // Clear break points in f and h. Set a new one in the script between
- // functions f and g and test that there is no break points in f and g any
- // more.
- ClearBreakPointFromJS(env->GetIsolate(), sbp2);
- ClearBreakPointFromJS(env->GetIsolate(), sbp4);
- int sbp5 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 4, -1);
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- g->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Set a break point in the code after the last function decleration.
- int sbp6 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 12, -1);
-
- // Reloading the script should not hit any break points.
- break_point_hit_count = 0;
- v8::Script::Compile(context, script, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- ClearBreakPointFromJS(env->GetIsolate(), sbp5);
- ClearBreakPointFromJS(env->GetIsolate(), sbp6);
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
-
-// Test top level script break points set on lines.
-TEST(ScriptBreakPointLineTopLevel) {
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
-
- v8::Local<v8::Context> context = env.context();
- v8::Local<v8::String> script =
- v8_str(env->GetIsolate(),
- "function f() {\n"
- " a = 1; // line 1\n"
- "}\n"
- "a = 2; // line 3\n");
- v8::Local<v8::Function> f;
- {
- v8::HandleScope scope(env->GetIsolate());
- CompileRunWithOrigin(script, "test.html");
- }
- f = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "f"))
- .ToLocalChecked());
-
- CcTest::CollectAllGarbage();
-
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
-
- // Call f and check that there was no break points.
- break_point_hit_count = 0;
- f->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
-
- // Recompile and run script and check that break point was not reapplied.
- break_point_hit_count = 0;
- CompileRunWithOrigin(script, "test.html");
- CHECK_EQ(0, break_point_hit_count);
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
-
-// Test that it is possible to add and remove break points in a top level
-// function which has no references but has not been collected yet.
-TEST(ScriptBreakPointTopLevelCrash) {
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- SetDebugEventListener(env->GetIsolate(), DebugEventBreakPointHitCount);
-
- CompileRunWithOrigin(
- "function f() {\n"
- " return 0;\n"
- "}\n",
- "test.html");
- int sbp1 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 1, -1);
- break_point_hit_count = 0;
-
- CompileRun("f();");
-
- CHECK_EQ(1, break_point_hit_count);
-
- int sbp2 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
- ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- ClearBreakPointFromJS(env->GetIsolate(), sbp2);
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
// Test that it is possible to remove the last break point for a function
// inside the break handling of that break point.
@@ -2072,7 +1502,7 @@ TEST(DebuggerStatementBreakpoint) {
foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- int bp = SetBreakPoint(foo, 0);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, 0);
// Set breakpoint does not duplicate hits
foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
@@ -2134,7 +1564,7 @@ TEST(DebugEvaluate) {
v8_str(env->GetIsolate(), "Hello, world!")};
// Call foo with breakpoint set before a=x and undefined as parameter.
- int bp = SetBreakPoint(foo, foo_break_position_1);
+ i::Handle<i::BreakPoint> bp = SetBreakPoint(foo, foo_break_position_1);
checks = checks_uu;
foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
@@ -2189,7 +1619,6 @@ TEST(DebugEvaluate) {
" y=0; /* To ensure break location.*/"
" a=x;"
" };"
- " debug.Debug.clearAllBreakPoints();"
" barbar();"
" y=0;a=x;"
"}",
@@ -2248,79 +1677,13 @@ TEST(ConditionalBreakpointWithCodeGenerationDisallowed) {
"foo");
// Set conditional breakpoint with condition 'true'.
- CompileRun("debug.Debug.setBreakPoint(foo, 2, 0, 'true')");
-
- debugEventCount = 0;
- env->AllowCodeGenerationFromStrings(false);
- foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
- CHECK_EQ(1, debugEventCount);
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
+ SetBreakPoint(foo, 4, "true");
-
-bool checkedDebugEvals = true;
-v8::Local<v8::Function> checkGlobalEvalFunction;
-v8::Local<v8::Function> checkFrameEvalFunction;
-static void CheckDebugEval(const v8::Debug::EventDetails& eventDetails) {
- if (eventDetails.GetEvent() == v8::Break) {
- ++debugEventCount;
- v8::HandleScope handleScope(CcTest::isolate());
-
- v8::Local<v8::Value> args[] = {eventDetails.GetExecutionState()};
- CHECK(
- checkGlobalEvalFunction->Call(eventDetails.GetEventContext(),
- eventDetails.GetEventContext()->Global(),
- 1, args)
- .ToLocalChecked()
- ->IsTrue());
- CHECK(checkFrameEvalFunction->Call(eventDetails.GetEventContext(),
- eventDetails.GetEventContext()->Global(),
- 1, args)
- .ToLocalChecked()
- ->IsTrue());
- }
-}
-
-
-// Test that the evaluation of expressions when a break point is hit generates
-// the correct results in case code generation from strings is disallowed in the
-// debugee context.
-TEST(DebugEvaluateWithCodeGenerationDisallowed) {
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- SetDebugEventListener(env->GetIsolate(), CheckDebugEval);
-
- v8::Local<v8::Context> context = env.context();
- v8::Local<v8::Function> foo = CompileFunction(&env,
- "var global = 'Global';\n"
- "function foo(x) {\n"
- " var local = 'Local';\n"
- " debugger;\n"
- " return local + x;\n"
- "}",
- "foo");
- checkGlobalEvalFunction = CompileFunction(&env,
- "function checkGlobalEval(exec_state) {\n"
- " return exec_state.evaluateGlobal('global').value() === 'Global';\n"
- "}",
- "checkGlobalEval");
-
- checkFrameEvalFunction = CompileFunction(&env,
- "function checkFrameEval(exec_state) {\n"
- " return exec_state.frame(0).evaluate('local').value() === 'Local';\n"
- "}",
- "checkFrameEval");
debugEventCount = 0;
env->AllowCodeGenerationFromStrings(false);
foo->Call(context, env->Global(), 0, nullptr).ToLocalChecked();
CHECK_EQ(1, debugEventCount);
- checkGlobalEvalFunction.Clear();
- checkFrameEvalFunction.Clear();
SetDebugEventListener(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded();
}
@@ -3470,6 +2833,7 @@ TEST(DebugStepFunctionCallApply) {
TEST(PauseInScript) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
env.ExposeDebug();
// Register a debug event listener which counts.
@@ -3487,7 +2851,12 @@ TEST(PauseInScript) {
.ToLocalChecked();
// Set breakpoint in the script.
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), script_name, 0, -1);
+ i::Handle<i::BreakPoint> break_point =
+ isolate->factory()->NewBreakPoint(0, isolate->factory()->empty_string());
+ int position = 0;
+ i::Handle<i::Script> i_script(
+ i::Script::cast(v8::Utils::OpenHandle(*script)->shared()->script()));
+ isolate->debug()->SetBreakPointForScript(i_script, break_point, &position);
break_point_hit_count = 0;
v8::Local<v8::Value> r = script->Run(context).ToLocalChecked();
@@ -4666,6 +4035,7 @@ TEST(NoHiddenProperties) {
TEST(SetDebugEventListenerOnUninitializedVM) {
+ v8::HandleScope scope(CcTest::isolate());
EnableDebugger(CcTest::isolate());
}
@@ -5497,48 +4867,6 @@ TEST(ExceptionEventWhenEventListenerIsReset) {
}
-// Tests after compile event is sent when there are some provisional
-// breakpoints out of the scripts lines range.
-TEST(ProvisionalBreakpointOnLineOutOfRange) {
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
- const char* script = "function f() {};";
- const char* resource_name = "test_resource";
-
- SetDebugEventListener(env->GetIsolate(), AfterCompileEventListener);
- v8::Local<v8::Context> context = env.context();
-
- // Set a couple of provisional breakpoint on lines out of the script lines
- // range.
- int sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), resource_name,
- 3, -1 /* no column */);
- int sbp2 =
- SetScriptBreakPointByNameFromJS(env->GetIsolate(), resource_name, 5, 5);
-
- after_compile_event_count = 0;
-
- v8::ScriptOrigin origin(v8_str(env->GetIsolate(), resource_name),
- v8::Integer::New(env->GetIsolate(), 10),
- v8::Integer::New(env->GetIsolate(), 1));
- // Compile a script whose first line number is greater than the breakpoints'
- // lines.
- v8::Script::Compile(context, v8_str(env->GetIsolate(), script), &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
-
- // If the script is compiled successfully there is exactly one after compile
- // event. In case of an exception in debugger code after compile event is not
- // sent.
- CHECK_EQ(1, after_compile_event_count);
-
- ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- ClearBreakPointFromJS(env->GetIsolate(), sbp2);
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
static void BreakEventListener(const v8::Debug::EventDetails& details) {
if (details.GetEvent() == v8::Break) break_point_hit_count++;
}
@@ -6190,41 +5518,6 @@ TEST(LiveEditDisabled) {
}
-TEST(PrecompiledFunction) {
- // Regression test for crbug.com/346207. If we have preparse data, parsing the
- // function in the presence of the debugger (and breakpoints) should still
- // succeed. The bug was that preparsing was done lazily and parsing was done
- // eagerly, so, the symbol streams didn't match.
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
- SetDebugEventListener(env->GetIsolate(), DebugBreakInlineListener);
-
- v8::Local<v8::Function> break_here =
- CompileFunction(&env, "function break_here(){}", "break_here");
- SetBreakPoint(break_here, 0);
-
- const char* source =
- "var a = b = c = 1; \n"
- "function this_is_lazy() { \n"
- // This symbol won't appear in the preparse data.
- " var a; \n"
- "} \n"
- "function bar() { \n"
- " return \"bar\"; \n"
- "}; \n"
- "a = b = c = 2; \n"
- "bar(); \n";
- v8::Local<v8::Value> result = ParserCacheCompileRun(source);
- CHECK(result->IsString());
- v8::String::Utf8Value utf8(env->GetIsolate(), result);
- CHECK_EQ(0, strcmp("bar", *utf8));
-
- SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded();
-}
-
-
static void DebugBreakStackTraceListener(
const v8::Debug::EventDetails& event_details) {
v8::StackTrace::CurrentStackTrace(CcTest::isolate(), 10);
@@ -6719,17 +6012,13 @@ TEST(BuiltinsExceptionPrediction) {
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate);
+ i::Snapshot::EnsureAllBuiltinsAreDeserialized(iisolate);
+
i::Builtins* builtins = iisolate->builtins();
bool fail = false;
for (int i = 0; i < i::Builtins::builtin_count; i++) {
Code* builtin = builtins->builtin(i);
-
if (builtin->kind() != Code::BUILTIN) continue;
- if (builtin->builtin_index() == i::Builtins::kDeserializeLazy &&
- i::Builtins::IsLazy(i)) {
- builtin = i::Snapshot::DeserializeBuiltin(iisolate, i);
- }
-
auto prediction = builtin->GetBuiltinCatchPrediction();
USE(prediction);
}
@@ -6767,8 +6056,9 @@ TEST(DebugGetPossibleBreakpointsReturnLocations) {
TEST(DebugEvaluateNoSideEffect) {
LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ EnableDebugger(env->GetIsolate());
i::Isolate* isolate = CcTest::i_isolate();
- i::HandleScope scope(isolate);
std::vector<i::Handle<i::JSFunction>> all_functions;
{
i::HeapIterator iterator(isolate->heap());
@@ -6789,4 +6079,5 @@ TEST(DebugEvaluateNoSideEffect) {
}
if (failed) isolate->clear_pending_exception();
}
+ DisableDebugger(env->GetIsolate());
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 300309244e..52912b7591 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -1568,6 +1568,9 @@ TEST(Barrier) {
COMPARE(mcr(p15, 0, r0, cr7, cr10, 4, ne), "1e070f9a mcrne (CP15DSB)");
COMPARE(mcr(p15, 0, r0, cr7, cr5, 4, mi), "4e070f95 mcrmi (CP15ISB)");
+ // Conditional speculation barrier.
+ COMPARE(csdb(), "e320f014 csdb");
+
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 13deeb534b..3813408a2b 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -121,8 +121,8 @@ TEST_(bootstrap) {
// Instructions generated by C compiler, disassembled by objdump, and
// reformatted to suit our disassembly style.
- COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [csp, #-96]!");
- COMPARE(dci(0x910003fd), "mov fp, csp");
+ COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [sp, #-96]!");
+ COMPARE(dci(0x910003fd), "mov fp, sp");
COMPARE(dci(0x9100e3a0), "add x0, fp, #0x38 (56)");
COMPARE(dci(0xb900001f), "str wzr, [x0]");
COMPARE(dci(0x528000e1), "movz w1, #0x7");
@@ -140,7 +140,7 @@ TEST_(bootstrap) {
COMPARE(dci(0x2a0103e0), "mov w0, w1");
COMPARE(dci(0x93407c00), "sxtw x0, w0");
COMPARE(dci(0x2a000020), "orr w0, w1, w0");
- COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [csp], #96");
+ COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [sp], #96");
CLEANUP();
}
@@ -160,12 +160,12 @@ TEST_(mov_mvn) {
COMPARE(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
COMPARE(Mov(x16, Operand(x20, SXTW, 3)), "sbfiz x16, x20, #3, #32");
- COMPARE(Mov(x0, csp), "mov x0, csp");
- COMPARE(Mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(Mov(x0, sp), "mov x0, sp");
+ COMPARE(Mov(w0, wsp), "mov w0, wsp");
COMPARE(Mov(x0, xzr), "mov x0, xzr");
COMPARE(Mov(w0, wzr), "mov w0, wzr");
- COMPARE(mov(x0, csp), "mov x0, csp");
- COMPARE(mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(mov(x0, sp), "mov x0, sp");
+ COMPARE(mov(w0, wsp), "mov w0, wsp");
COMPARE(mov(x0, xzr), "mov x0, xzr");
COMPARE(mov(w0, wzr), "mov w0, wzr");
@@ -290,14 +290,14 @@ TEST_(add_immediate) {
"adds x16, x17, #0xaa000 (696320)");
COMPARE(cmn(w18, Operand(0xff)), "cmn w18, #0xff (255)");
COMPARE(cmn(x19, Operand(0xff000)), "cmn x19, #0xff000 (1044480)");
- COMPARE(add(w0, wcsp, Operand(0)), "mov w0, wcsp");
- COMPARE(add(csp, x0, Operand(0)), "mov csp, x0");
+ COMPARE(add(w0, wsp, Operand(0)), "mov w0, wsp");
+ COMPARE(add(sp, x0, Operand(0)), "mov sp, x0");
- COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
- COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
- COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
- COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
- COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+ COMPARE(add(w1, wsp, Operand(8)), "add w1, wsp, #0x8 (8)");
+ COMPARE(add(x2, sp, Operand(16)), "add x2, sp, #0x10 (16)");
+ COMPARE(add(wsp, wsp, Operand(42)), "add wsp, wsp, #0x2a (42)");
+ COMPARE(cmn(sp, Operand(24)), "cmn sp, #0x18 (24)");
+ COMPARE(adds(wzr, wsp, Operand(9)), "cmn wsp, #0x9 (9)");
CLEANUP();
}
@@ -321,11 +321,11 @@ TEST_(sub_immediate) {
COMPARE(cmp(w18, Operand(0xff)), "cmp w18, #0xff (255)");
COMPARE(cmp(x19, Operand(0xff000)), "cmp x19, #0xff000 (1044480)");
- COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
- COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
- COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
- COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
- COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+ COMPARE(add(w1, wsp, Operand(8)), "add w1, wsp, #0x8 (8)");
+ COMPARE(add(x2, sp, Operand(16)), "add x2, sp, #0x10 (16)");
+ COMPARE(add(wsp, wsp, Operand(42)), "add wsp, wsp, #0x2a (42)");
+ COMPARE(cmn(sp, Operand(24)), "cmn sp, #0x18 (24)");
+ COMPARE(adds(wzr, wsp, Operand(9)), "cmn wsp, #0x9 (9)");
CLEANUP();
}
@@ -345,12 +345,12 @@ TEST_(add_shifted) {
COMPARE(cmn(w24, Operand(w25)), "cmn w24, w25");
COMPARE(cmn(x26, Operand(cp, LSL, 63)), "cmn x26, cp, lsl #63");
- COMPARE(add(x0, csp, Operand(x1)), "add x0, csp, x1");
- COMPARE(add(w2, wcsp, Operand(w3)), "add w2, wcsp, w3");
- COMPARE(add(x4, csp, Operand(x5, LSL, 1)), "add x4, csp, x5, lsl #1");
+ COMPARE(add(x0, sp, Operand(x1)), "add x0, sp, x1");
+ COMPARE(add(w2, wsp, Operand(w3)), "add w2, wsp, w3");
+ COMPARE(add(x4, sp, Operand(x5, LSL, 1)), "add x4, sp, x5, lsl #1");
COMPARE(add(x4, xzr, Operand(x5, LSL, 1)), "add x4, xzr, x5, lsl #1");
- COMPARE(add(w6, wcsp, Operand(w7, LSL, 3)), "add w6, wcsp, w7, lsl #3");
- COMPARE(adds(xzr, csp, Operand(x8, LSL, 4)), "cmn csp, x8, lsl #4");
+ COMPARE(add(w6, wsp, Operand(w7, LSL, 3)), "add w6, wsp, w7, lsl #3");
+ COMPARE(adds(xzr, sp, Operand(x8, LSL, 4)), "cmn sp, x8, lsl #4");
COMPARE(adds(xzr, xzr, Operand(x8, LSL, 5)), "cmn xzr, x8, lsl #5");
CLEANUP();
@@ -375,12 +375,12 @@ TEST_(sub_shifted) {
COMPARE(negs(w1, Operand(w2)), "negs w1, w2");
COMPARE(negs(x3, Operand(x4, ASR, 61)), "negs x3, x4, asr #61");
- COMPARE(sub(x0, csp, Operand(x1)), "sub x0, csp, x1");
- COMPARE(sub(w2, wcsp, Operand(w3)), "sub w2, wcsp, w3");
- COMPARE(sub(x4, csp, Operand(x5, LSL, 1)), "sub x4, csp, x5, lsl #1");
+ COMPARE(sub(x0, sp, Operand(x1)), "sub x0, sp, x1");
+ COMPARE(sub(w2, wsp, Operand(w3)), "sub w2, wsp, w3");
+ COMPARE(sub(x4, sp, Operand(x5, LSL, 1)), "sub x4, sp, x5, lsl #1");
COMPARE(sub(x4, xzr, Operand(x5, LSL, 1)), "neg x4, x5, lsl #1");
- COMPARE(sub(w6, wcsp, Operand(w7, LSL, 3)), "sub w6, wcsp, w7, lsl #3");
- COMPARE(subs(xzr, csp, Operand(x8, LSL, 4)), "cmp csp, x8, lsl #4");
+ COMPARE(sub(w6, wsp, Operand(w7, LSL, 3)), "sub w6, wsp, w7, lsl #3");
+ COMPARE(subs(xzr, sp, Operand(x8, LSL, 4)), "cmp sp, x8, lsl #4");
COMPARE(subs(xzr, xzr, Operand(x8, LSL, 5)), "cmp xzr, x8, lsl #5");
CLEANUP();
@@ -403,11 +403,11 @@ TEST_(add_extended) {
COMPARE(cmn(w0, Operand(w1, UXTB, 2)), "cmn w0, w1, uxtb #2");
COMPARE(cmn(x2, Operand(x3, SXTH, 4)), "cmn x2, w3, sxth #4");
- COMPARE(add(w0, wcsp, Operand(w1, UXTB)), "add w0, wcsp, w1, uxtb");
- COMPARE(add(x2, csp, Operand(x3, UXTH, 1)), "add x2, csp, w3, uxth #1");
- COMPARE(add(wcsp, wcsp, Operand(w4, UXTW, 2)), "add wcsp, wcsp, w4, lsl #2");
- COMPARE(cmn(csp, Operand(xzr, UXTX, 3)), "cmn csp, xzr, lsl #3");
- COMPARE(cmn(csp, Operand(xzr, LSL, 4)), "cmn csp, xzr, lsl #4");
+ COMPARE(add(w0, wsp, Operand(w1, UXTB)), "add w0, wsp, w1, uxtb");
+ COMPARE(add(x2, sp, Operand(x3, UXTH, 1)), "add x2, sp, w3, uxth #1");
+ COMPARE(add(wsp, wsp, Operand(w4, UXTW, 2)), "add wsp, wsp, w4, lsl #2");
+ COMPARE(cmn(sp, Operand(xzr, UXTX, 3)), "cmn sp, xzr, lsl #3");
+ COMPARE(cmn(sp, Operand(xzr, LSL, 4)), "cmn sp, xzr, lsl #4");
CLEANUP();
}
@@ -429,11 +429,11 @@ TEST_(sub_extended) {
COMPARE(cmp(w0, Operand(w1, SXTB, 1)), "cmp w0, w1, sxtb #1");
COMPARE(cmp(x2, Operand(x3, UXTH, 3)), "cmp x2, w3, uxth #3");
- COMPARE(sub(w0, wcsp, Operand(w1, UXTB)), "sub w0, wcsp, w1, uxtb");
- COMPARE(sub(x2, csp, Operand(x3, UXTH, 1)), "sub x2, csp, w3, uxth #1");
- COMPARE(sub(wcsp, wcsp, Operand(w4, UXTW, 2)), "sub wcsp, wcsp, w4, lsl #2");
- COMPARE(cmp(csp, Operand(xzr, UXTX, 3)), "cmp csp, xzr, lsl #3");
- COMPARE(cmp(csp, Operand(xzr, LSL, 4)), "cmp csp, xzr, lsl #4");
+ COMPARE(sub(w0, wsp, Operand(w1, UXTB)), "sub w0, wsp, w1, uxtb");
+ COMPARE(sub(x2, sp, Operand(x3, UXTH, 1)), "sub x2, sp, w3, uxth #1");
+ COMPARE(sub(wsp, wsp, Operand(w4, UXTW, 2)), "sub wsp, wsp, w4, lsl #2");
+ COMPARE(cmp(sp, Operand(xzr, UXTX, 3)), "cmp sp, xzr, lsl #3");
+ COMPARE(cmp(sp, Operand(xzr, LSL, 4)), "cmp sp, xzr, lsl #4");
CLEANUP();
}
@@ -684,10 +684,10 @@ TEST_(logical_immediate) {
"ands fp, x0, #0x100000000");
// Test stack pointer.
- COMPARE(and_(wcsp, wzr, Operand(7)), "and wcsp, wzr, #0x7");
+ COMPARE(and_(wsp, wzr, Operand(7)), "and wsp, wzr, #0x7");
COMPARE(ands(xzr, xzr, Operand(7)), "tst xzr, #0x7");
- COMPARE(orr(csp, xzr, Operand(15)), "orr csp, xzr, #0xf");
- COMPARE(eor(wcsp, w0, Operand(31)), "eor wcsp, w0, #0x1f");
+ COMPARE(orr(sp, xzr, Operand(15)), "orr sp, xzr, #0xf");
+ COMPARE(eor(wsp, w0, Operand(31)), "eor wsp, w0, #0x1f");
// Test move aliases.
COMPARE(orr(w0, wzr, Operand(0x00000780)), "orr w0, wzr, #0x780");
@@ -1089,11 +1089,11 @@ TEST(load_store_v_offset) {
COMPARE(str(q31, MemOperand(x1, 16)), "str q31, [x1, #16]");
COMPARE(str(q0, MemOperand(x3, 65520)), "str q0, [x3, #65520]");
- COMPARE(ldr(s24, MemOperand(csp)), "ldr s24, [csp]");
- COMPARE(ldr(d25, MemOperand(csp, 8)), "ldr d25, [csp, #8]");
- COMPARE(ldr(b26, MemOperand(csp, 1)), "ldr b26, [csp, #1]");
- COMPARE(ldr(h27, MemOperand(csp, 2)), "ldr h27, [csp, #2]");
- COMPARE(ldr(q28, MemOperand(csp, 16)), "ldr q28, [csp, #16]");
+ COMPARE(ldr(s24, MemOperand(sp)), "ldr s24, [sp]");
+ COMPARE(ldr(d25, MemOperand(sp, 8)), "ldr d25, [sp, #8]");
+ COMPARE(ldr(b26, MemOperand(sp, 1)), "ldr b26, [sp, #1]");
+ COMPARE(ldr(h27, MemOperand(sp, 2)), "ldr h27, [sp, #2]");
+ COMPARE(ldr(q28, MemOperand(sp, 16)), "ldr q28, [sp, #16]");
CLEANUP();
}
@@ -1135,11 +1135,11 @@ TEST(load_store_v_pre) {
COMPARE(str(q31, MemOperand(x3, 255, PreIndex)), "str q31, [x3, #255]!");
COMPARE(str(q0, MemOperand(x5, -256, PreIndex)), "str q0, [x5, #-256]!");
- COMPARE(str(b24, MemOperand(csp, 1, PreIndex)), "str b24, [csp, #1]!");
- COMPARE(str(h25, MemOperand(csp, -2, PreIndex)), "str h25, [csp, #-2]!");
- COMPARE(str(s26, MemOperand(csp, 4, PreIndex)), "str s26, [csp, #4]!");
- COMPARE(str(d27, MemOperand(csp, -8, PreIndex)), "str d27, [csp, #-8]!");
- COMPARE(str(q28, MemOperand(csp, 16, PreIndex)), "str q28, [csp, #16]!");
+ COMPARE(str(b24, MemOperand(sp, 1, PreIndex)), "str b24, [sp, #1]!");
+ COMPARE(str(h25, MemOperand(sp, -2, PreIndex)), "str h25, [sp, #-2]!");
+ COMPARE(str(s26, MemOperand(sp, 4, PreIndex)), "str s26, [sp, #4]!");
+ COMPARE(str(d27, MemOperand(sp, -8, PreIndex)), "str d27, [sp, #-8]!");
+ COMPARE(str(q28, MemOperand(sp, 16, PreIndex)), "str q28, [sp, #16]!");
CLEANUP();
}
@@ -1181,11 +1181,11 @@ TEST(load_store_v_post) {
COMPARE(str(q31, MemOperand(x3, 255, PostIndex)), "str q31, [x3], #255");
COMPARE(str(q0, MemOperand(x5, -256, PostIndex)), "str q0, [x5], #-256");
- COMPARE(ldr(b24, MemOperand(csp, -1, PreIndex)), "ldr b24, [csp, #-1]!");
- COMPARE(ldr(h25, MemOperand(csp, 2, PreIndex)), "ldr h25, [csp, #2]!");
- COMPARE(ldr(s26, MemOperand(csp, -4, PreIndex)), "ldr s26, [csp, #-4]!");
- COMPARE(ldr(d27, MemOperand(csp, 8, PreIndex)), "ldr d27, [csp, #8]!");
- COMPARE(ldr(q28, MemOperand(csp, -16, PreIndex)), "ldr q28, [csp, #-16]!");
+ COMPARE(ldr(b24, MemOperand(sp, -1, PreIndex)), "ldr b24, [sp, #-1]!");
+ COMPARE(ldr(h25, MemOperand(sp, 2, PreIndex)), "ldr h25, [sp, #2]!");
+ COMPARE(ldr(s26, MemOperand(sp, -4, PreIndex)), "ldr s26, [sp, #-4]!");
+ COMPARE(ldr(d27, MemOperand(sp, 8, PreIndex)), "ldr d27, [sp, #8]!");
+ COMPARE(ldr(q28, MemOperand(sp, -16, PreIndex)), "ldr q28, [sp, #-16]!");
CLEANUP();
}
@@ -1198,88 +1198,88 @@ TEST(load_store_v_regoffset) {
COMPARE(ldr(b2, MemOperand(x3, w4, SXTW)), "ldr b2, [x3, w4, sxtw]");
// We can't assemble this instruction, but we check it disassembles correctly.
COMPARE(dci(0x3c657883), "ldr b3, [x4, x5, lsl #0]");
- COMPARE(ldr(b30, MemOperand(csp, xzr)), "ldr b30, [csp, xzr]");
- COMPARE(ldr(b31, MemOperand(csp, wzr, UXTW)), "ldr b31, [csp, wzr, uxtw]");
+ COMPARE(ldr(b30, MemOperand(sp, xzr)), "ldr b30, [sp, xzr]");
+ COMPARE(ldr(b31, MemOperand(sp, wzr, UXTW)), "ldr b31, [sp, wzr, uxtw]");
COMPARE(ldr(h0, MemOperand(x1, x2)), "ldr h0, [x1, x2]");
COMPARE(ldr(h1, MemOperand(x2, w3, UXTW)), "ldr h1, [x2, w3, uxtw]");
COMPARE(ldr(h2, MemOperand(x3, w4, SXTW)), "ldr h2, [x3, w4, sxtw]");
COMPARE(ldr(h3, MemOperand(x4, w5, UXTW, 1)), "ldr h3, [x4, w5, uxtw #1]");
COMPARE(ldr(h4, MemOperand(x5, w5, SXTW, 1)), "ldr h4, [x5, w5, sxtw #1]");
- COMPARE(ldr(h30, MemOperand(csp, xzr)), "ldr h30, [csp, xzr]");
- COMPARE(ldr(h31, MemOperand(csp, wzr, SXTW, 1)),
- "ldr h31, [csp, wzr, sxtw #1]");
+ COMPARE(ldr(h30, MemOperand(sp, xzr)), "ldr h30, [sp, xzr]");
+ COMPARE(ldr(h31, MemOperand(sp, wzr, SXTW, 1)),
+ "ldr h31, [sp, wzr, sxtw #1]");
COMPARE(ldr(s0, MemOperand(x1, x2)), "ldr s0, [x1, x2]");
COMPARE(ldr(s1, MemOperand(x2, w3, UXTW)), "ldr s1, [x2, w3, uxtw]");
COMPARE(ldr(s2, MemOperand(x3, w4, SXTW)), "ldr s2, [x3, w4, sxtw]");
COMPARE(ldr(s3, MemOperand(x4, w5, UXTW, 2)), "ldr s3, [x4, w5, uxtw #2]");
COMPARE(ldr(s4, MemOperand(x5, w5, SXTW, 2)), "ldr s4, [x5, w5, sxtw #2]");
- COMPARE(ldr(s30, MemOperand(csp, xzr)), "ldr s30, [csp, xzr]");
- COMPARE(ldr(s31, MemOperand(csp, wzr, SXTW, 2)),
- "ldr s31, [csp, wzr, sxtw #2]");
+ COMPARE(ldr(s30, MemOperand(sp, xzr)), "ldr s30, [sp, xzr]");
+ COMPARE(ldr(s31, MemOperand(sp, wzr, SXTW, 2)),
+ "ldr s31, [sp, wzr, sxtw #2]");
COMPARE(ldr(d0, MemOperand(x1, x2)), "ldr d0, [x1, x2]");
COMPARE(ldr(d1, MemOperand(x2, w3, UXTW)), "ldr d1, [x2, w3, uxtw]");
COMPARE(ldr(d2, MemOperand(x3, w4, SXTW)), "ldr d2, [x3, w4, sxtw]");
COMPARE(ldr(d3, MemOperand(x4, w5, UXTW, 3)), "ldr d3, [x4, w5, uxtw #3]");
COMPARE(ldr(d4, MemOperand(x5, w5, SXTW, 3)), "ldr d4, [x5, w5, sxtw #3]");
- COMPARE(ldr(d30, MemOperand(csp, xzr)), "ldr d30, [csp, xzr]");
- COMPARE(ldr(d31, MemOperand(csp, wzr, SXTW, 3)),
- "ldr d31, [csp, wzr, sxtw #3]");
+ COMPARE(ldr(d30, MemOperand(sp, xzr)), "ldr d30, [sp, xzr]");
+ COMPARE(ldr(d31, MemOperand(sp, wzr, SXTW, 3)),
+ "ldr d31, [sp, wzr, sxtw #3]");
COMPARE(ldr(q0, MemOperand(x1, x2)), "ldr q0, [x1, x2]");
COMPARE(ldr(q1, MemOperand(x2, w3, UXTW)), "ldr q1, [x2, w3, uxtw]");
COMPARE(ldr(q2, MemOperand(x3, w4, SXTW)), "ldr q2, [x3, w4, sxtw]");
COMPARE(ldr(q3, MemOperand(x4, w5, UXTW, 4)), "ldr q3, [x4, w5, uxtw #4]");
COMPARE(ldr(q4, MemOperand(x5, w5, SXTW, 4)), "ldr q4, [x5, w5, sxtw #4]");
- COMPARE(ldr(q30, MemOperand(csp, xzr)), "ldr q30, [csp, xzr]");
- COMPARE(ldr(q31, MemOperand(csp, wzr, SXTW, 4)),
- "ldr q31, [csp, wzr, sxtw #4]");
+ COMPARE(ldr(q30, MemOperand(sp, xzr)), "ldr q30, [sp, xzr]");
+ COMPARE(ldr(q31, MemOperand(sp, wzr, SXTW, 4)),
+ "ldr q31, [sp, wzr, sxtw #4]");
COMPARE(str(b0, MemOperand(x1, x2)), "str b0, [x1, x2]");
COMPARE(str(b1, MemOperand(x2, w3, UXTW)), "str b1, [x2, w3, uxtw]");
COMPARE(str(b2, MemOperand(x3, w4, SXTW)), "str b2, [x3, w4, sxtw]");
// We can't assemble this instruction, but we check it disassembles correctly.
COMPARE(dci(0x3c257883), "str b3, [x4, x5, lsl #0]");
- COMPARE(str(b30, MemOperand(csp, xzr)), "str b30, [csp, xzr]");
- COMPARE(str(b31, MemOperand(csp, wzr, UXTW)), "str b31, [csp, wzr, uxtw]");
+ COMPARE(str(b30, MemOperand(sp, xzr)), "str b30, [sp, xzr]");
+ COMPARE(str(b31, MemOperand(sp, wzr, UXTW)), "str b31, [sp, wzr, uxtw]");
COMPARE(str(h0, MemOperand(x1, x2)), "str h0, [x1, x2]");
COMPARE(str(h1, MemOperand(x2, w3, UXTW)), "str h1, [x2, w3, uxtw]");
COMPARE(str(h2, MemOperand(x3, w4, SXTW)), "str h2, [x3, w4, sxtw]");
COMPARE(str(h3, MemOperand(x4, w5, UXTW, 1)), "str h3, [x4, w5, uxtw #1]");
COMPARE(str(h4, MemOperand(x5, w5, SXTW, 1)), "str h4, [x5, w5, sxtw #1]");
- COMPARE(str(h30, MemOperand(csp, xzr)), "str h30, [csp, xzr]");
- COMPARE(str(h31, MemOperand(csp, wzr, SXTW, 1)),
- "str h31, [csp, wzr, sxtw #1]");
+ COMPARE(str(h30, MemOperand(sp, xzr)), "str h30, [sp, xzr]");
+ COMPARE(str(h31, MemOperand(sp, wzr, SXTW, 1)),
+ "str h31, [sp, wzr, sxtw #1]");
COMPARE(str(s0, MemOperand(x1, x2)), "str s0, [x1, x2]");
COMPARE(str(s1, MemOperand(x2, w3, UXTW)), "str s1, [x2, w3, uxtw]");
COMPARE(str(s2, MemOperand(x3, w4, SXTW)), "str s2, [x3, w4, sxtw]");
COMPARE(str(s3, MemOperand(x4, w5, UXTW, 2)), "str s3, [x4, w5, uxtw #2]");
COMPARE(str(s4, MemOperand(x5, w5, SXTW, 2)), "str s4, [x5, w5, sxtw #2]");
- COMPARE(str(s30, MemOperand(csp, xzr)), "str s30, [csp, xzr]");
- COMPARE(str(s31, MemOperand(csp, wzr, SXTW, 2)),
- "str s31, [csp, wzr, sxtw #2]");
+ COMPARE(str(s30, MemOperand(sp, xzr)), "str s30, [sp, xzr]");
+ COMPARE(str(s31, MemOperand(sp, wzr, SXTW, 2)),
+ "str s31, [sp, wzr, sxtw #2]");
COMPARE(str(d0, MemOperand(x1, x2)), "str d0, [x1, x2]");
COMPARE(str(d1, MemOperand(x2, w3, UXTW)), "str d1, [x2, w3, uxtw]");
COMPARE(str(d2, MemOperand(x3, w4, SXTW)), "str d2, [x3, w4, sxtw]");
COMPARE(str(d3, MemOperand(x4, w5, UXTW, 3)), "str d3, [x4, w5, uxtw #3]");
COMPARE(str(d4, MemOperand(x5, w5, SXTW, 3)), "str d4, [x5, w5, sxtw #3]");
- COMPARE(str(d30, MemOperand(csp, xzr)), "str d30, [csp, xzr]");
- COMPARE(str(d31, MemOperand(csp, wzr, SXTW, 3)),
- "str d31, [csp, wzr, sxtw #3]");
+ COMPARE(str(d30, MemOperand(sp, xzr)), "str d30, [sp, xzr]");
+ COMPARE(str(d31, MemOperand(sp, wzr, SXTW, 3)),
+ "str d31, [sp, wzr, sxtw #3]");
COMPARE(str(q0, MemOperand(x1, x2)), "str q0, [x1, x2]");
COMPARE(str(q1, MemOperand(x2, w3, UXTW)), "str q1, [x2, w3, uxtw]");
COMPARE(str(q2, MemOperand(x3, w4, SXTW)), "str q2, [x3, w4, sxtw]");
COMPARE(str(q3, MemOperand(x4, w5, UXTW, 4)), "str q3, [x4, w5, uxtw #4]");
COMPARE(str(q4, MemOperand(x5, w5, SXTW, 4)), "str q4, [x5, w5, sxtw #4]");
- COMPARE(str(q30, MemOperand(csp, xzr)), "str q30, [csp, xzr]");
- COMPARE(str(q31, MemOperand(csp, wzr, SXTW, 4)),
- "str q31, [csp, wzr, sxtw #4]");
+ COMPARE(str(q30, MemOperand(sp, xzr)), "str q30, [sp, xzr]");
+ COMPARE(str(q31, MemOperand(sp, wzr, SXTW, 4)),
+ "str q31, [sp, wzr, sxtw #4]");
CLEANUP();
}
@@ -1304,8 +1304,8 @@ TEST_(load_store_unscaled) {
COMPARE(str(x26, MemOperand(x27, -1)), "stur x26, [cp, #-1]");
COMPARE(str(x28, MemOperand(x29, 255)), "stur x28, [fp, #255]");
COMPARE(str(x30, MemOperand(x0, -256)), "stur lr, [x0, #-256]");
- COMPARE(ldr(w0, MemOperand(csp, 1)), "ldur w0, [csp, #1]");
- COMPARE(str(x1, MemOperand(csp, -1)), "stur x1, [csp, #-1]");
+ COMPARE(ldr(w0, MemOperand(sp, 1)), "ldur w0, [sp, #1]");
+ COMPARE(str(x1, MemOperand(sp, -1)), "stur x1, [sp, #-1]");
COMPARE(ldrb(w2, MemOperand(x3, -2)), "ldurb w2, [x3, #-2]");
COMPARE(ldrsb(w4, MemOperand(x5, -3)), "ldursb w4, [x5, #-3]");
COMPARE(ldrsb(x6, MemOperand(x7, -4)), "ldursb x6, [x7, #-4]");
@@ -1476,18 +1476,18 @@ TEST_(load_store_acquire_release) {
COMPARE(stlxrb(w21, w22, x23), "stlxrb w21, w22, [x23]");
COMPARE(stlxrh(w24, w25, x26), "stlxrh w24, w25, [x26]");
- COMPARE(ldarb(wzr, csp), "ldarb wzr, [csp]");
- COMPARE(ldarh(wzr, csp), "ldarh wzr, [csp]");
- COMPARE(ldar(wzr, csp), "ldar wzr, [csp]");
- COMPARE(stlrb(wzr, csp), "stlrb wzr, [csp]");
- COMPARE(stlrh(wzr, csp), "stlrh wzr, [csp]");
- COMPARE(stlr(wzr, csp), "stlr wzr, [csp]");
- COMPARE(ldaxrb(wzr, csp), "ldaxrb wzr, [csp]");
- COMPARE(ldaxrh(wzr, csp), "ldaxrh wzr, [csp]");
- COMPARE(ldaxr(wzr, csp), "ldaxr wzr, [csp]");
- COMPARE(stlxrb(w0, wzr, csp), "stlxrb w0, wzr, [csp]");
- COMPARE(stlxrh(wzr, w1, csp), "stlxrh wzr, w1, [csp]");
- COMPARE(stlxr(w2, wzr, csp), "stlxr w2, wzr, [csp]");
+ COMPARE(ldarb(wzr, sp), "ldarb wzr, [sp]");
+ COMPARE(ldarh(wzr, sp), "ldarh wzr, [sp]");
+ COMPARE(ldar(wzr, sp), "ldar wzr, [sp]");
+ COMPARE(stlrb(wzr, sp), "stlrb wzr, [sp]");
+ COMPARE(stlrh(wzr, sp), "stlrh wzr, [sp]");
+ COMPARE(stlr(wzr, sp), "stlr wzr, [sp]");
+ COMPARE(ldaxrb(wzr, sp), "ldaxrb wzr, [sp]");
+ COMPARE(ldaxrh(wzr, sp), "ldaxrh wzr, [sp]");
+ COMPARE(ldaxr(wzr, sp), "ldaxr wzr, [sp]");
+ COMPARE(stlxrb(w0, wzr, sp), "stlxrb w0, wzr, [sp]");
+ COMPARE(stlxrh(wzr, w1, sp), "stlxrh wzr, w1, [sp]");
+ COMPARE(stlxr(w2, wzr, sp), "stlxr w2, wzr, [sp]");
CLEANUP();
}
@@ -2038,6 +2038,9 @@ TEST_(barriers) {
// ISB
COMPARE(Isb(), "isb");
+ // CSDB
+ COMPARE(Csdb(), "csdb");
+
CLEANUP();
}
@@ -2099,24 +2102,24 @@ TEST_(barriers) {
TEST(neon_load_store_vector) {
SET_UP_MASM();
-#define DISASM_INST(M, S) \
- COMPARE(Ld1(v0.M, MemOperand(x15)), "ld1 {v0." S "}, [x15]"); \
- COMPARE(Ld1(v1.M, v2.M, MemOperand(x16)), \
- "ld1 {v1." S ", v2." S "}, [x16]"); \
- COMPARE(Ld1(v3.M, v4.M, v5.M, MemOperand(x17)), \
- "ld1 {v3." S ", v4." S ", v5." S "}, [x17]"); \
- COMPARE(Ld1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
- "ld1 {v6." S ", v7." S ", v8_." S ", v9." S "}, [x18]") \
- COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
- "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]") \
- COMPARE(Ld2(v1.M, v2.M, MemOperand(x16)), \
- "ld2 {v1." S ", v2." S "}, [x16]"); \
- COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17)), \
- "ld3 {v3." S ", v4." S ", v5." S "}, [x17]"); \
- COMPARE(Ld4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
- "ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
- COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
- "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]") \
+#define DISASM_INST(M, S) \
+ COMPARE(Ld1(v0.M, MemOperand(x15)), "ld1 {v0." S "}, [x15]"); \
+ COMPARE(Ld1(v1.M, v2.M, MemOperand(x16)), \
+ "ld1 {v1." S ", v2." S "}, [x16]"); \
+ COMPARE(Ld1(v3.M, v4.M, v5.M, MemOperand(x17)), \
+ "ld1 {v3." S ", v4." S ", v5." S "}, [x17]"); \
+ COMPARE(Ld1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
+ "ld1 {v6." S ", v7." S ", v8_." S ", v9." S "}, [x18]") \
+ COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
+ COMPARE(Ld2(v1.M, v2.M, MemOperand(x16)), \
+ "ld2 {v1." S ", v2." S "}, [x16]"); \
+ COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17)), \
+ "ld3 {v3." S ", v4." S ", v5." S "}, [x17]"); \
+ COMPARE(Ld4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18)), \
+ "ld4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18]") \
+ COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
@@ -2130,7 +2133,7 @@ TEST(neon_load_store_vector) {
COMPARE(Ld1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"ld1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
COMPARE(Ld1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
- "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24") \
+ "ld1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
COMPARE(Ld2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
"ld2 {v1." S ", v2." S "}, [x16], x21"); \
COMPARE(Ld3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
@@ -2138,7 +2141,7 @@ TEST(neon_load_store_vector) {
COMPARE(Ld4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
"ld4 {v6." S ", v7." S ", v8_." S ", v9." S "}, [x18], x23") \
COMPARE(Ld4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
- "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24") \
+ "ld4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
@@ -2195,36 +2198,36 @@ TEST(neon_load_store_vector) {
"st1 {v23." S ", v24." S ", v25." S "}, [x17]"); \
COMPARE(St1(v26.M, v27.M, v28.M, v29.M, MemOperand(x18)), \
"st1 {v26." S ", v27." S ", v28." S ", v29." S "}, [x18]") \
- COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(csp)), \
- "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]") \
+ COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]") \
COMPARE(St2(VLIST2(v21.M), MemOperand(x16)), \
"st2 {v21." S ", v22." S "}, [x16]"); \
COMPARE(St3(v23.M, v24.M, v25.M, MemOperand(x17)), \
"st3 {v23." S ", v24." S ", v25." S "}, [x17]"); \
- COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(csp)), \
- "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp]")
+ COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp)), \
+ "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp]")
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
-#define DISASM_INST(M, S) \
- COMPARE(St1(v0.M, MemOperand(x15, x20, PostIndex)), \
- "st1 {v0." S "}, [x15], x20"); \
- COMPARE(St1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
- "st1 {v1." S ", v2." S "}, [x16], x21"); \
- COMPARE(St1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
- "st1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
- COMPARE(St1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
- "st1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
- COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(csp, x24, PostIndex)), \
- "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24") \
- COMPARE(St2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
- "st2 {v1." S ", v2." S "}, [x16], x21"); \
- COMPARE(St3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
- "st3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
- COMPARE(St4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
- "st4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
- COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(csp, x24, PostIndex)), \
- "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [csp], x24")
+#define DISASM_INST(M, S) \
+ COMPARE(St1(v0.M, MemOperand(x15, x20, PostIndex)), \
+ "st1 {v0." S "}, [x15], x20"); \
+ COMPARE(St1(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
+ "st1 {v1." S ", v2." S "}, [x16], x21"); \
+ COMPARE(St1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
+ "st1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
+ COMPARE(St1(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
+ "st1 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
+ COMPARE(St1(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
+ "st1 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24") \
+ COMPARE(St2(v1.M, v2.M, MemOperand(x16, x21, PostIndex)), \
+ "st2 {v1." S ", v2." S "}, [x16], x21"); \
+ COMPARE(St3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \
+ "st3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \
+ COMPARE(St4(v6.M, v7.M, v8_.M, v9.M, MemOperand(x18, x23, PostIndex)), \
+ "st4 {v6." S ", v7." S ", v8." S ", v9." S "}, [x18], x23") \
+ COMPARE(St4(v30.M, v31.M, v0.M, v1.M, MemOperand(sp, x24, PostIndex)), \
+ "st4 {v30." S ", v31." S ", v0." S ", v1." S "}, [sp], x24")
NEON_FORMAT_LIST(DISASM_INST);
#undef DISASM_INST
@@ -2357,7 +2360,7 @@ TEST(neon_load_store_lane) {
COMPARE(Ld1(v10.H(), 7, MemOperand(x25)), "ld1 {v10.h}[7], [x25]");
COMPARE(Ld1(v11.S(), 1, MemOperand(x26)), "ld1 {v11.s}[1], [x26]");
COMPARE(Ld1(v12.S(), 3, MemOperand(x27)), "ld1 {v12.s}[3], [cp]");
- COMPARE(Ld1(v13.D(), 1, MemOperand(csp)), "ld1 {v13.d}[1], [csp]");
+ COMPARE(Ld1(v13.D(), 1, MemOperand(sp)), "ld1 {v13.d}[1], [sp]");
COMPARE(Ld1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"ld1 {v0.b}[0], [x15], x0");
@@ -2387,10 +2390,10 @@ TEST(neon_load_store_lane) {
"ld1 {v12.s}[3], [cp], x5");
COMPARE(Ld1(v12.S(), 3, MemOperand(x27, 4, PostIndex)),
"ld1 {v12.s}[3], [cp], #4");
- COMPARE(Ld1(v13.D(), 1, MemOperand(csp, x6, PostIndex)),
- "ld1 {v13.d}[1], [csp], x6");
- COMPARE(Ld1(v13.D(), 1, MemOperand(csp, 8, PostIndex)),
- "ld1 {v13.d}[1], [csp], #8");
+ COMPARE(Ld1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "ld1 {v13.d}[1], [sp], x6");
+ COMPARE(Ld1(v13.D(), 1, MemOperand(sp, 8, PostIndex)),
+ "ld1 {v13.d}[1], [sp], #8");
COMPARE(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15)),
"ld2 {v0.b, v1.b}[0], [x15]");
@@ -2418,8 +2421,8 @@ TEST(neon_load_store_lane) {
"ld2 {v11.s, v12.s}[1], [x26]");
COMPARE(Ld2(v12.S(), v13.S(), 3, MemOperand(x27)),
"ld2 {v12.s, v13.s}[3], [cp]");
- COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(csp)),
- "ld2 {v13.d, v14.d}[1], [csp]");
+ COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp)),
+ "ld2 {v13.d, v14.d}[1], [sp]");
COMPARE(Ld2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"ld2 {v0.b, v1.b}[0], [x15], x0");
@@ -2449,10 +2452,10 @@ TEST(neon_load_store_lane) {
"ld2 {v12.s, v13.s}[3], [cp], x5");
COMPARE(Ld2(v11.S(), v12.S(), 3, MemOperand(x26, 8, PostIndex)),
"ld2 {v11.s, v12.s}[3], [x26], #8");
- COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(csp, x6, PostIndex)),
- "ld2 {v13.d, v14.d}[1], [csp], x6");
- COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(csp, 16, PostIndex)),
- "ld2 {v13.d, v14.d}[1], [csp], #16");
+ COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "ld2 {v13.d, v14.d}[1], [sp], x6");
+ COMPARE(Ld2(v13.D(), v14.D(), 1, MemOperand(sp, 16, PostIndex)),
+ "ld2 {v13.d, v14.d}[1], [sp], #16");
COMPARE(Ld3(v0.V8B(), v1.V8B(), v2.V8B(), 0, MemOperand(x15)),
"ld3 {v0.b, v1.b, v2.b}[0], [x15]");
@@ -2480,8 +2483,8 @@ TEST(neon_load_store_lane) {
"ld3 {v11.s, v12.s, v13.s}[1], [x26]");
COMPARE(Ld3(v12.S(), v13.S(), v14.S(), 3, MemOperand(x27)),
"ld3 {v12.s, v13.s, v14.s}[3], [cp]");
- COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(csp)),
- "ld3 {v13.d, v14.d, v15.d}[1], [csp]");
+ COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp)),
+ "ld3 {v13.d, v14.d, v15.d}[1], [sp]");
COMPARE(Ld3(v0.V8B(), v1.V8B(), v2.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"ld3 {v0.b, v1.b, v2.b}[0], [x15], x0");
@@ -2512,10 +2515,10 @@ TEST(neon_load_store_lane) {
"ld3 {v12.s, v13.s, v14.s}[3], [cp], x5");
COMPARE(Ld3(v12.S(), v13.S(), v14.S(), 3, MemOperand(x27, 12, PostIndex)),
"ld3 {v12.s, v13.s, v14.s}[3], [cp], #12");
- COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(csp, x6, PostIndex)),
- "ld3 {v13.d, v14.d, v15.d}[1], [csp], x6");
- COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(csp, 24, PostIndex)),
- "ld3 {v13.d, v14.d, v15.d}[1], [csp], #24");
+ COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "ld3 {v13.d, v14.d, v15.d}[1], [sp], x6");
+ COMPARE(Ld3(v13.D(), v14.D(), v15.D(), 1, MemOperand(sp, 24, PostIndex)),
+ "ld3 {v13.d, v14.d, v15.d}[1], [sp], #24");
COMPARE(Ld4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), 0, MemOperand(x15)),
"ld4 {v0.b, v1.b, v2.b, v3.b}[0], [x15]");
@@ -2543,8 +2546,8 @@ TEST(neon_load_store_lane) {
"ld4 {v11.s, v12.s, v13.s, v14.s}[1], [x26]");
COMPARE(Ld4(v12.S(), v13.S(), v14.S(), v15.S(), 3, MemOperand(x27)),
"ld4 {v12.s, v13.s, v14.s, v15.s}[3], [cp]");
- COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(csp)),
- "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [csp]");
+ COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(sp)),
+ "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp]");
COMPARE(Ld4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), 0,
MemOperand(x15, x0, PostIndex)),
@@ -2588,12 +2591,12 @@ TEST(neon_load_store_lane) {
COMPARE(Ld4(v11.S(), v12.S(), v13.S(), v14.S(), 3,
MemOperand(x26, 16, PostIndex)),
"ld4 {v11.s, v12.s, v13.s, v14.s}[3], [x26], #16");
- COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1,
- MemOperand(csp, x6, PostIndex)),
- "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [csp], x6");
- COMPARE(Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1,
- MemOperand(csp, 32, PostIndex)),
- "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [csp], #32");
+ COMPARE(
+ Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
+ COMPARE(
+ Ld4(v13.D(), v14.D(), v15.D(), v16.D(), 1, MemOperand(sp, 32, PostIndex)),
+ "ld4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], #32");
COMPARE(St1(v0.V8B(), 0, MemOperand(x15)), "st1 {v0.b}[0], [x15]");
COMPARE(St1(v1.V16B(), 1, MemOperand(x16)), "st1 {v1.b}[1], [x16]");
@@ -2608,7 +2611,7 @@ TEST(neon_load_store_lane) {
COMPARE(St1(v10.H(), 7, MemOperand(x25)), "st1 {v10.h}[7], [x25]");
COMPARE(St1(v11.S(), 1, MemOperand(x26)), "st1 {v11.s}[1], [x26]");
COMPARE(St1(v12.S(), 3, MemOperand(x27)), "st1 {v12.s}[3], [cp]");
- COMPARE(St1(v13.D(), 1, MemOperand(csp)), "st1 {v13.d}[1], [csp]");
+ COMPARE(St1(v13.D(), 1, MemOperand(sp)), "st1 {v13.d}[1], [sp]");
COMPARE(St1(v0.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"st1 {v0.b}[0], [x15], x0");
@@ -2636,8 +2639,8 @@ TEST(neon_load_store_lane) {
"st1 {v11.s}[1], [x26], #4");
COMPARE(St1(v12.S(), 3, MemOperand(x27, x5, PostIndex)),
"st1 {v12.s}[3], [cp], x5");
- COMPARE(St1(v13.D(), 1, MemOperand(csp, x6, PostIndex)),
- "st1 {v13.d}[1], [csp], x6");
+ COMPARE(St1(v13.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "st1 {v13.d}[1], [sp], x6");
COMPARE(St2(v0.V8B(), v1.V8B(), 0, MemOperand(x15, x0, PostIndex)),
"st2 {v0.b, v1.b}[0], [x15], x0");
COMPARE(St2(v1.V16B(), v2.V16B(), 1, MemOperand(x16, 2, PostIndex)),
@@ -2664,8 +2667,8 @@ TEST(neon_load_store_lane) {
"st2 {v11.s, v12.s}[1], [x26], #8");
COMPARE(St2(v12.S(), v13.S(), 3, MemOperand(x27, x5, PostIndex)),
"st2 {v12.s, v13.s}[3], [cp], x5");
- COMPARE(St2(v13.D(), v14.D(), 1, MemOperand(csp, x6, PostIndex)),
- "st2 {v13.d, v14.d}[1], [csp], x6");
+ COMPARE(St2(v13.D(), v14.D(), 1, MemOperand(sp, x6, PostIndex)),
+ "st2 {v13.d, v14.d}[1], [sp], x6");
COMPARE(St3(VLIST3(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
"st3 {v0.b, v1.b, v2.b}[0], [x15], x0");
COMPARE(St3(VLIST3(v1.V16B()), 1, MemOperand(x16, 3, PostIndex)),
@@ -2692,8 +2695,8 @@ TEST(neon_load_store_lane) {
"st3 {v11.s, v12.s, v13.s}[1], [x26], #12");
COMPARE(St3(VLIST3(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
"st3 {v12.s, v13.s, v14.s}[3], [cp], x5");
- COMPARE(St3(VLIST3(v13.D()), 1, MemOperand(csp, x6, PostIndex)),
- "st3 {v13.d, v14.d, v15.d}[1], [csp], x6");
+ COMPARE(St3(VLIST3(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
+ "st3 {v13.d, v14.d, v15.d}[1], [sp], x6");
COMPARE(St4(VLIST4(v0.V8B()), 0, MemOperand(x15, x0, PostIndex)),
"st4 {v0.b, v1.b, v2.b, v3.b}[0], [x15], x0");
@@ -2721,8 +2724,8 @@ TEST(neon_load_store_lane) {
"st4 {v11.s, v12.s, v13.s, v14.s}[1], [x26], #16");
COMPARE(St4(VLIST4(v12.S()), 3, MemOperand(x27, x5, PostIndex)),
"st4 {v12.s, v13.s, v14.s, v15.s}[3], [cp], x5");
- COMPARE(St4(VLIST4(v13.D()), 1, MemOperand(csp, x6, PostIndex)),
- "st4 {v13.d, v14.d, v15.d, v16.d}[1], [csp], x6");
+ COMPARE(St4(VLIST4(v13.D()), 1, MemOperand(sp, x6, PostIndex)),
+ "st4 {v13.d, v14.d, v15.d, v16.d}[1], [sp], x6");
CLEANUP();
}
@@ -2810,7 +2813,7 @@ TEST(neon_load_all_lanes) {
COMPARE(Ld1r(v17.V8H(), MemOperand(x3)), "ld1r {v17.8h}, [x3]");
COMPARE(Ld1r(v18.V2S(), MemOperand(x4)), "ld1r {v18.2s}, [x4]");
COMPARE(Ld1r(v19.V4S(), MemOperand(x5)), "ld1r {v19.4s}, [x5]");
- COMPARE(Ld1r(v20.V2D(), MemOperand(csp)), "ld1r {v20.2d}, [csp]");
+ COMPARE(Ld1r(v20.V2D(), MemOperand(sp)), "ld1r {v20.2d}, [sp]");
COMPARE(Ld1r(v21.V1D(), MemOperand(x30)), "ld1r {v21.1d}, [lr]");
COMPARE(Ld1r(v22.V8B(), MemOperand(x6, 1, PostIndex)),
@@ -2842,8 +2845,8 @@ TEST(neon_load_all_lanes) {
"ld2r {v18.2s, v19.2s}, [x4]");
COMPARE(Ld2r(v19.V4S(), v20.V4S(), MemOperand(x5)),
"ld2r {v19.4s, v20.4s}, [x5]");
- COMPARE(Ld2r(v20.V2D(), v21.V2D(), MemOperand(csp)),
- "ld2r {v20.2d, v21.2d}, [csp]");
+ COMPARE(Ld2r(v20.V2D(), v21.V2D(), MemOperand(sp)),
+ "ld2r {v20.2d, v21.2d}, [sp]");
COMPARE(Ld2r(v21.V8B(), v22.V8B(), MemOperand(x6, 2, PostIndex)),
"ld2r {v21.8b, v22.8b}, [x6], #2");
COMPARE(Ld2r(v22.V16B(), v23.V16B(), MemOperand(x7, x16, PostIndex)),
@@ -2871,8 +2874,8 @@ TEST(neon_load_all_lanes) {
"ld3r {v18.2s, v19.2s, v20.2s}, [x4]");
COMPARE(Ld3r(v19.V4S(), v20.V4S(), v21.V4S(), MemOperand(x5)),
"ld3r {v19.4s, v20.4s, v21.4s}, [x5]");
- COMPARE(Ld3r(v20.V2D(), v21.V2D(), v22.V2D(), MemOperand(csp)),
- "ld3r {v20.2d, v21.2d, v22.2d}, [csp]");
+ COMPARE(Ld3r(v20.V2D(), v21.V2D(), v22.V2D(), MemOperand(sp)),
+ "ld3r {v20.2d, v21.2d, v22.2d}, [sp]");
COMPARE(Ld3r(v21.V8B(), v22.V8B(), v23.V8B(), MemOperand(x6, 3, PostIndex)),
"ld3r {v21.8b, v22.8b, v23.8b}, [x6], #3");
COMPARE(
@@ -2902,8 +2905,8 @@ TEST(neon_load_all_lanes) {
"ld4r {v18.2s, v19.2s, v20.2s, v21.2s}, [x4]");
COMPARE(Ld4r(v19.V4S(), v20.V4S(), v21.V4S(), v22.V4S(), MemOperand(x5)),
"ld4r {v19.4s, v20.4s, v21.4s, v22.4s}, [x5]");
- COMPARE(Ld4r(v20.V2D(), v21.V2D(), v22.V2D(), v23.V2D(), MemOperand(csp)),
- "ld4r {v20.2d, v21.2d, v22.2d, v23.2d}, [csp]");
+ COMPARE(Ld4r(v20.V2D(), v21.V2D(), v22.V2D(), v23.V2D(), MemOperand(sp)),
+ "ld4r {v20.2d, v21.2d, v22.2d, v23.2d}, [sp]");
COMPARE(Ld4r(v21.V8B(), v22.V8B(), v23.V8B(), v24.V8B(),
MemOperand(x6, 4, PostIndex)),
"ld4r {v21.8b, v22.8b, v23.8b, v24.8b}, [x6], #4");
@@ -4955,5 +4958,25 @@ TEST(neon_shift_immediate) {
CLEANUP();
}
+#undef TEST_
+#undef EXP_SIZE
+#undef INSTR_SIZE
+#undef SET_UP_MASM
+#undef SET_UP_ASM
+#undef COMPARE
+#undef COMPARE_PREFIX
+#undef CLEANUP
+#undef VLIST2
+#undef VLIST3
+#undef VLIST4
+#undef NEON_FORMAT_LIST
+#undef NEON_FORMAT_LIST_LP
+#undef NEON_FORMAT_LIST_LW
+#undef NEON_FORMAT_LIST_LW2
+#undef NEON_FORMAT_LIST_BHS
+#undef NEON_FORMAT_LIST_HS
+#undef NEON_FORMAT_LIST_FP
+#undef NEON_SCALAR_FORMAT_LIST
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 35638c723a..bab3a00e4b 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -42,7 +42,26 @@ namespace internal {
#define __ assm.
-
+void Disassemble(FILE* f, byte* begin, byte* end) {
+ disasm::NameConverter converter;
+ disasm::Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecodeForTesting(buffer, pc);
+ fprintf(f, "%p", static_cast<void*>(prev_pc));
+ fprintf(f, " ");
+
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ fprintf(f, "%02x", *bp);
+ }
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ fprintf(f, " ");
+ }
+ fprintf(f, " %s\n", buffer.start());
+ }
+}
static void DummyStaticFunction(Object* result) {
}
@@ -69,7 +88,7 @@ TEST(DisasmIa320) {
// ---- All instructions that I can think of
__ add(edx, ebx);
- __ add(edx, Operand(12, RelocInfo::NONE32));
+ __ add(edx, Operand(12, RelocInfo::NONE));
__ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16));
__ add(edx, Operand(ebx, 1999));
@@ -865,7 +884,7 @@ TEST(DisasmIa320) {
code->Print(os);
byte* begin = code->instruction_start();
byte* end = begin + code->instruction_size();
- disasm::Disassembler::Disassemble(stdout, begin, end);
+ Disassemble(stdout, begin, end);
#endif
}
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index bbc1f89480..5138ef4751 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -47,6 +47,26 @@ namespace internal {
static void DummyStaticFunction(Object* result) {
}
+void Disassemble(FILE* f, byte* begin, byte* end) {
+ disasm::NameConverter converter;
+ disasm::Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecodeForTesting(buffer, pc);
+ fprintf(f, "%p", static_cast<void*>(prev_pc));
+ fprintf(f, " ");
+
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ fprintf(f, "%02x", *bp);
+ }
+ for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
+ fprintf(f, " ");
+ }
+ fprintf(f, " %s\n", buffer.start());
+ }
+}
TEST(DisasmX64) {
CcTest::InitializeVM();
@@ -960,7 +980,7 @@ TEST(DisasmX64) {
code->Print(os);
byte* begin = code->instruction_start();
byte* end = begin + code->instruction_size();
- disasm::Disassembler::Disassemble(stdout, begin, end);
+ Disassemble(stdout, begin, end);
#endif
}
diff --git a/deps/v8/test/cctest/test-extra.js b/deps/v8/test/cctest/test-extra.js
index 689738c02b..9b17e401a2 100644
--- a/deps/v8/test/cctest/test-extra.js
+++ b/deps/v8/test/cctest/test-extra.js
@@ -23,7 +23,7 @@
// Exercise all of the extras utils:
// - v8.createPrivateSymbol
- // - v8.simpleBind, v8.uncurryThis
+ // - v8.uncurryThis
// - v8.InternalPackedArray
// - v8.createPromise, v8.resolvePromise, v8.rejectPromise
@@ -35,7 +35,7 @@
const apply = v8.uncurryThis(Function.prototype.apply);
const Promise = global.Promise;
- const Promise_resolve = v8.simpleBind(Promise.resolve, Promise);
+ const Promise_resolve = Promise.resolve.bind(Promise);
const arrayToTest = new v8.InternalPackedArray();
arrayToTest.push(1);
@@ -49,6 +49,22 @@
arrayToTest[1] === 1 && slicedArray.length === 2 &&
slicedArray[0] === "c" && slicedArray[1] === 1;
+ binding.testCreatePromise = function() {
+ return v8.createPromise();
+ }
+
+ binding.testCreatePromiseWithParent = function(parent) {
+ return v8.createPromise(parent);
+ }
+
+ binding.testRejectPromise = function(promise, reason) {
+ return v8.rejectPromise(promise, reason);
+ }
+
+ binding.testResolvePromise = function(promise, resolution) {
+ return v8.resolvePromise(promise, resolution);
+ }
+
binding.testExtraCanUseUtils = function() {
const fulfilledPromise = v8.createPromise();
v8.resolvePromise(
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index e590b60649..c5fb46c858 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -95,7 +95,7 @@ TEST(VectorStructure) {
CHECK_EQ(1,
FeedbackMetadata::GetSlotSize(FeedbackSlotKind::kCreateClosure));
FeedbackSlot slot = helper.slot(1);
- Cell* cell = Cell::cast(vector->Get(slot));
+ FeedbackCell* cell = FeedbackCell::cast(vector->Get(slot));
CHECK_EQ(cell->value(), *factory->undefined_value());
}
}
@@ -171,10 +171,8 @@ TEST(VectorCallICStates) {
Handle<FeedbackVector> feedback_vector =
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- CallICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- // CallIC doesn't return map feedback.
- CHECK(!nexus.FindFirstMap());
CompileRun("f(function() { return 16; })");
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
@@ -200,7 +198,7 @@ TEST(VectorCallFeedback) {
Handle<FeedbackVector> feedback_vector =
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- CallICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CHECK(nexus.GetFeedback()->IsWeakCell());
@@ -224,7 +222,7 @@ TEST(VectorCallFeedbackForArray) {
Handle<FeedbackVector> feedback_vector =
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- CallICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CHECK(nexus.GetFeedback()->IsWeakCell());
@@ -252,7 +250,7 @@ TEST(VectorCallCounts) {
Handle<FeedbackVector> feedback_vector =
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- CallICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CompileRun("f(foo); f(foo);");
@@ -281,7 +279,7 @@ TEST(VectorConstructCounts) {
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- CallICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CHECK(feedback_vector->Get(slot)->IsWeakCell());
@@ -312,18 +310,20 @@ TEST(VectorSpeculationMode) {
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- CallICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(SpeculationMode::kAllowSpeculation, nexus.GetSpeculationMode());
CompileRun("f(Foo); f(Foo);");
CHECK_EQ(3, nexus.GetCallCount());
CHECK_EQ(SpeculationMode::kAllowSpeculation, nexus.GetSpeculationMode());
- nexus.SetSpeculationMode(SpeculationMode::kAllowSpeculation);
nexus.SetSpeculationMode(SpeculationMode::kDisallowSpeculation);
CHECK_EQ(SpeculationMode::kDisallowSpeculation, nexus.GetSpeculationMode());
+ CHECK_EQ(3, nexus.GetCallCount());
+
nexus.SetSpeculationMode(SpeculationMode::kAllowSpeculation);
CHECK_EQ(SpeculationMode::kAllowSpeculation, nexus.GetSpeculationMode());
+ CHECK_EQ(3, nexus.GetCallCount());
}
TEST(VectorLoadICStates) {
@@ -342,7 +342,7 @@ TEST(VectorLoadICStates) {
Handle<FeedbackVector> feedback_vector =
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- LoadICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
CompileRun("f(o)");
@@ -408,9 +408,9 @@ TEST(VectorLoadGlobalICSlotSharing) {
FeedbackSlot slot1 = helper.slot(0);
FeedbackSlot slot2 = helper.slot(1);
CHECK_EQ(MONOMORPHIC,
- LoadGlobalICNexus(feedback_vector, slot1).StateFromFeedback());
+ FeedbackNexus(feedback_vector, slot1).StateFromFeedback());
CHECK_EQ(MONOMORPHIC,
- LoadGlobalICNexus(feedback_vector, slot2).StateFromFeedback());
+ FeedbackNexus(feedback_vector, slot2).StateFromFeedback());
}
@@ -431,7 +431,7 @@ TEST(VectorLoadICOnSmi) {
Handle<FeedbackVector> feedback_vector =
Handle<FeedbackVector>(f->feedback_vector(), isolate);
FeedbackSlot slot(0);
- LoadICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
CompileRun("f(34)");
@@ -632,7 +632,7 @@ TEST(VectorStoreICBasic) {
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(1, helper.slot_count());
FeedbackSlot slot(0);
- StoreICNexus nexus(feedback_vector, slot);
+ FeedbackNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
@@ -657,7 +657,7 @@ TEST(StoreOwnIC) {
CHECK_EQ(2, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackSlotKind::kLiteral);
CHECK_SLOT_KIND(helper, 1, FeedbackSlotKind::kStoreOwnNamed);
- StoreOwnICNexus nexus(feedback_vector, helper.slot(1));
+ FeedbackNexus nexus(feedback_vector, helper.slot(1));
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index d9f919fbee..2e755dc32c 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -98,7 +98,6 @@ class NamedEntriesDetector {
static const v8::HeapGraphNode* GetGlobalObject(
const v8::HeapSnapshot* snapshot) {
- CHECK_EQ(2, snapshot->GetRoot()->GetChildrenCount());
// The 0th-child is (GC Roots), 1st is the user root.
const v8::HeapGraphNode* global_obj =
snapshot->GetRoot()->GetChild(1)->GetToNode();
@@ -107,6 +106,32 @@ static const v8::HeapGraphNode* GetGlobalObject(
return global_obj;
}
+static const char* GetName(const v8::HeapGraphNode* node) {
+ return const_cast<i::HeapEntry*>(reinterpret_cast<const i::HeapEntry*>(node))
+ ->name();
+}
+
+static size_t GetSize(const v8::HeapGraphNode* node) {
+ return const_cast<i::HeapEntry*>(reinterpret_cast<const i::HeapEntry*>(node))
+ ->self_size();
+}
+
+static const v8::HeapGraphNode* GetChildByName(const v8::HeapGraphNode* node,
+ const char* name) {
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphNode* child = node->GetChild(i)->GetToNode();
+ if (!strcmp(name, GetName(child))) {
+ return child;
+ }
+ }
+ return nullptr;
+}
+
+static const v8::HeapGraphNode* GetRootChild(const v8::HeapSnapshot* snapshot,
+ const char* name) {
+ return GetChildByName(snapshot->GetRoot(), name);
+}
+
static const v8::HeapGraphNode* GetProperty(v8::Isolate* isolate,
const v8::HeapGraphNode* node,
v8::HeapGraphEdge::Type type,
@@ -173,6 +198,11 @@ static bool ValidateSnapshot(const v8::HeapSnapshot* snapshot, int depth = 3) {
return unretained_entries_count == 0;
}
+bool EndsWith(const char* a, const char* b) {
+ size_t length_a = strlen(a);
+ size_t length_b = strlen(b);
+ return (length_a >= length_b) && !strcmp(a + length_a - length_b, b);
+}
TEST(HeapSnapshot) {
LocalContext env2;
@@ -694,6 +724,38 @@ TEST(HeapSnapshotInternalReferences) {
v8::HeapGraphEdge::kInternal, "1"));
}
+TEST(HeapSnapshotEphemeron) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ CompileRun(
+ "class KeyClass{};\n"
+ "class ValueClass{};\n"
+ "var wm = new WeakMap();\n"
+ "function foo(key) { wm.set(key, new ValueClass()); }\n"
+ "var key = new KeyClass();\n"
+ "foo(key);");
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+
+ const v8::HeapGraphNode* key = GetProperty(
+ env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "key");
+ CHECK(key);
+ bool success = false;
+ for (int i = 0, count = key->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* edge = key->GetChild(i);
+ const v8::HeapGraphNode* child = edge->GetToNode();
+ if (!strcmp("ValueClass", GetName(child))) {
+ v8::String::Utf8Value edge_name(CcTest::isolate(), edge->GetName());
+ CHECK(EndsWith(*edge_name, " / WeakMap"));
+ success = true;
+ break;
+ }
+ }
+ CHECK(success);
+}
TEST(HeapSnapshotAddressReuse) {
LocalContext env;
@@ -2256,14 +2318,12 @@ TEST(AllocationSitesAreVisible) {
const v8::HeapGraphNode* fun_code = GetProperty(
env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "fun");
CHECK(fun_code);
- const v8::HeapGraphNode* vector_cell =
+ const v8::HeapGraphNode* feedback_cell =
GetProperty(env->GetIsolate(), fun_code, v8::HeapGraphEdge::kInternal,
- "feedback_vector_cell");
- // TODO(mvstanton): I'm not sure if this is the best way to expose
- // literals. Is it too much to expose the Cell?
- CHECK(vector_cell);
+ "feedback_cell");
+ CHECK(feedback_cell);
const v8::HeapGraphNode* vector = GetProperty(
- env->GetIsolate(), vector_cell, v8::HeapGraphEdge::kInternal, "value");
+ env->GetIsolate(), feedback_cell, v8::HeapGraphEdge::kInternal, "value");
CHECK_EQ(v8::HeapGraphNode::kArray, vector->GetType());
CHECK_EQ(3, vector->GetChildrenCount());
@@ -2769,21 +2829,17 @@ TEST(JSPromise) {
const v8::HeapGraphNode* resolved = GetProperty(
env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "resolved");
CHECK(GetProperty(env->GetIsolate(), resolved, v8::HeapGraphEdge::kInternal,
- "result"));
+ "reactions_or_result"));
const v8::HeapGraphNode* rejected = GetProperty(
env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "rejected");
CHECK(GetProperty(env->GetIsolate(), rejected, v8::HeapGraphEdge::kInternal,
- "result"));
+ "reactions_or_result"));
const v8::HeapGraphNode* pending = GetProperty(
env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "pending");
CHECK(GetProperty(env->GetIsolate(), pending, v8::HeapGraphEdge::kInternal,
- "deferred_promise"));
- CHECK(GetProperty(env->GetIsolate(), pending, v8::HeapGraphEdge::kInternal,
- "fulfill_reactions"));
- CHECK(GetProperty(env->GetIsolate(), pending, v8::HeapGraphEdge::kInternal,
- "reject_reactions"));
+ "reactions_or_result"));
const char* objectNames[] = {"resolved", "rejected", "pending", "chained"};
for (auto objectName : objectNames) {
@@ -2793,11 +2849,213 @@ TEST(JSPromise) {
}
}
+class EmbedderNode : public v8::EmbedderGraph::Node {
+ public:
+ EmbedderNode(const char* name, size_t size,
+ v8::EmbedderGraph::Node* wrapper_node = nullptr)
+ : name_(name), size_(size), wrapper_node_(wrapper_node) {}
+
+ // Graph::Node overrides.
+ const char* Name() override { return name_; }
+ size_t SizeInBytes() override { return size_; }
+ Node* WrapperNode() override { return wrapper_node_; }
+
+ private:
+ const char* name_;
+ size_t size_;
+ Node* wrapper_node_;
+};
+
+class EmbedderRootNode : public EmbedderNode {
+ public:
+ explicit EmbedderRootNode(const char* name) : EmbedderNode(name, 0) {}
+ // Graph::Node override.
+ bool IsRootNode() { return true; }
+};
+
+// Used to pass the global object to the BuildEmbedderGraph callback.
+// Otherwise, the callback has to iterate the global handles to find the
+// global object.
+v8::Local<v8::Value>* global_object_pointer;
+
+void BuildEmbedderGraph(v8::Isolate* v8_isolate, v8::EmbedderGraph* graph) {
+ using Node = v8::EmbedderGraph::Node;
+ Node* global_node = graph->V8Node(*global_object_pointer);
+ Node* embedder_node_A = graph->AddNode(
+ std::unique_ptr<Node>(new EmbedderNode("EmbedderNodeA", 10)));
+ Node* embedder_node_B = graph->AddNode(
+ std::unique_ptr<Node>(new EmbedderNode("EmbedderNodeB", 20)));
+ Node* embedder_node_C = graph->AddNode(
+ std::unique_ptr<Node>(new EmbedderNode("EmbedderNodeC", 30)));
+ Node* embedder_root = graph->AddNode(
+ std::unique_ptr<Node>(new EmbedderRootNode("EmbedderRoot")));
+ graph->AddEdge(global_node, embedder_node_A);
+ graph->AddEdge(embedder_node_A, embedder_node_B);
+ graph->AddEdge(embedder_root, embedder_node_C);
+ graph->AddEdge(embedder_node_C, global_node);
+}
+
+void CheckEmbedderGraphSnapshot(v8::Isolate* isolate,
+ const v8::HeapSnapshot* snapshot) {
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* embedder_node_A =
+ GetChildByName(global, "EmbedderNodeA");
+ CHECK_EQ(10, GetSize(embedder_node_A));
+ const v8::HeapGraphNode* embedder_node_B =
+ GetChildByName(embedder_node_A, "EmbedderNodeB");
+ CHECK_EQ(20, GetSize(embedder_node_B));
+ const v8::HeapGraphNode* embedder_root =
+ GetRootChild(snapshot, "EmbedderRoot");
+ CHECK(embedder_root);
+ const v8::HeapGraphNode* embedder_node_C =
+ GetChildByName(embedder_root, "EmbedderNodeC");
+ CHECK_EQ(30, GetSize(embedder_node_C));
+ const v8::HeapGraphNode* global_reference =
+ GetChildByName(embedder_node_C, "Object");
+ CHECK(global_reference);
+}
+
+TEST(EmbedderGraph) {
+ i::FLAG_heap_profiler_use_embedder_graph = true;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
+ v8::Local<v8::Value> global_object =
+ v8::Utils::ToLocal(i::Handle<i::JSObject>(
+ (isolate->context()->native_context()->global_object())));
+ global_object_pointer = &global_object;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ heap_profiler->SetBuildEmbedderGraphCallback(BuildEmbedderGraph);
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ CheckEmbedderGraphSnapshot(env->GetIsolate(), snapshot);
+}
+
+TEST(StrongHandleAnnotation) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::Persistent<v8::Object> handle1, handle2;
+ handle1.Reset(env->GetIsolate(), v8::Object::New(env->GetIsolate()));
+ handle2.Reset(env->GetIsolate(), v8::Object::New(env->GetIsolate()));
+ handle1.AnnotateStrongRetainer("my_label");
+ handle2.AnnotateStrongRetainer("my_label");
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ const v8::HeapGraphNode* gc_roots = GetRootChild(snapshot, "(GC roots)");
+ CHECK(gc_roots);
+ const v8::HeapGraphNode* global_handles =
+ GetChildByName(gc_roots, "(Global handles)");
+ CHECK(global_handles);
+ int found = 0;
+ for (int i = 0, count = global_handles->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* edge = global_handles->GetChild(i);
+ v8::String::Utf8Value edge_name(CcTest::isolate(), edge->GetName());
+ if (EndsWith(*edge_name, "my_label")) ++found;
+ }
+ CHECK_EQ(2, found);
+}
+
+void BuildEmbedderGraphWithWrapperNode(v8::Isolate* v8_isolate,
+ v8::EmbedderGraph* graph) {
+ using Node = v8::EmbedderGraph::Node;
+ Node* global_node = graph->V8Node(*global_object_pointer);
+ Node* wrapper_node = graph->AddNode(
+ std::unique_ptr<Node>(new EmbedderNode("WrapperNode / TAG", 10)));
+ Node* embedder_node = graph->AddNode(std::unique_ptr<Node>(
+ new EmbedderNode("EmbedderNode", 10, wrapper_node)));
+ Node* other_node =
+ graph->AddNode(std::unique_ptr<Node>(new EmbedderNode("OtherNode", 20)));
+ graph->AddEdge(global_node, embedder_node);
+ graph->AddEdge(wrapper_node, other_node);
+
+ Node* wrapper_node2 = graph->AddNode(
+ std::unique_ptr<Node>(new EmbedderNode("WrapperNode2", 10)));
+ Node* embedder_node2 = graph->AddNode(std::unique_ptr<Node>(
+ new EmbedderNode("EmbedderNode2", 10, wrapper_node2)));
+ graph->AddEdge(global_node, embedder_node2);
+ graph->AddEdge(embedder_node2, wrapper_node2);
+ graph->AddEdge(wrapper_node2, other_node);
+}
+
+TEST(EmbedderGraphWithWrapperNode) {
+ i::FLAG_heap_profiler_use_embedder_graph = true;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
+ v8::Local<v8::Value> global_object =
+ v8::Utils::ToLocal(i::Handle<i::JSObject>(
+ (isolate->context()->native_context()->global_object())));
+ global_object_pointer = &global_object;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ heap_profiler->SetBuildEmbedderGraphCallback(
+ BuildEmbedderGraphWithWrapperNode);
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* embedder_node =
+ GetChildByName(global, "EmbedderNode / TAG");
+ const v8::HeapGraphNode* other_node =
+ GetChildByName(embedder_node, "OtherNode");
+ CHECK(other_node);
+ const v8::HeapGraphNode* wrapper_node =
+ GetChildByName(embedder_node, "WrapperNode / TAG");
+ CHECK(!wrapper_node);
+
+ const v8::HeapGraphNode* embedder_node2 =
+ GetChildByName(global, "EmbedderNode2");
+ other_node = GetChildByName(embedder_node2, "OtherNode");
+ CHECK(other_node);
+ const v8::HeapGraphNode* wrapper_node2 =
+ GetChildByName(embedder_node, "WrapperNode2");
+ CHECK(!wrapper_node2);
+}
+
+class EmbedderNodeWithPrefix : public v8::EmbedderGraph::Node {
+ public:
+ EmbedderNodeWithPrefix(const char* prefix, const char* name)
+ : prefix_(prefix), name_(name) {}
+
+ // Graph::Node overrides.
+ const char* Name() override { return name_; }
+ size_t SizeInBytes() override { return 0; }
+ const char* NamePrefix() override { return prefix_; }
+
+ private:
+ const char* prefix_;
+ const char* name_;
+};
+
+void BuildEmbedderGraphWithPrefix(v8::Isolate* v8_isolate,
+ v8::EmbedderGraph* graph) {
+ using Node = v8::EmbedderGraph::Node;
+ Node* global_node = graph->V8Node(*global_object_pointer);
+ Node* node = graph->AddNode(
+ std::unique_ptr<Node>(new EmbedderNodeWithPrefix("Detached", "Node")));
+ graph->AddEdge(global_node, node);
+}
+
+TEST(EmbedderGraphWithPrefix) {
+ i::FLAG_heap_profiler_use_embedder_graph = true;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
+ v8::Local<v8::Value> global_object =
+ v8::Utils::ToLocal(i::Handle<i::JSObject>(
+ (isolate->context()->native_context()->global_object())));
+ global_object_pointer = &global_object;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ heap_profiler->SetBuildEmbedderGraphCallback(BuildEmbedderGraphWithPrefix);
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* node = GetChildByName(global, "Detached Node");
+ CHECK(node);
+}
+
static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
-
TEST(AddressToTraceMap) {
i::AddressToTraceMap map;
@@ -3161,6 +3419,17 @@ TEST(SamplingHeapProfilerLargeInterval) {
heap_profiler->StopSamplingHeapProfiler();
}
+TEST(HeapSnapshotPrototypeNotJSReceiver) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ CompileRun(
+ "function object() {}"
+ "object.prototype = 42;");
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+}
+
TEST(SamplingHeapProfilerSampleDuringDeopt) {
i::FLAG_allow_natives_syntax = true;
@@ -3206,14 +3475,3 @@ TEST(SamplingHeapProfilerSampleDuringDeopt) {
CHECK(profile);
heap_profiler->StopSamplingHeapProfiler();
}
-
-TEST(HeapSnapshotPrototypeNotJSReceiver) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- CompileRun(
- "function object() {}"
- "object.prototype = 42;");
- const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
- CHECK(ValidateSnapshot(snapshot));
-}
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index be6a71b0e3..ec282746d5 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -1142,6 +1142,8 @@ TEST(SubclassArrayBuiltinNoInlineNew) {
TEST(SubclassTypedArrayBuiltin) {
// Avoid eventual completion of in-object slack tracking.
FLAG_always_opt = false;
+ // Make BigInt64Array/BigUint64Array available for testing.
+ FLAG_harmony_bigint = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-isolate-independent-builtins.cc b/deps/v8/test/cctest/test-isolate-independent-builtins.cc
new file mode 100644
index 0000000000..eff2017355
--- /dev/null
+++ b/deps/v8/test/cctest/test-isolate-independent-builtins.cc
@@ -0,0 +1,369 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/cctest.h"
+
+#include "src/assembler-inl.h"
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+#include "src/macro-assembler-inl.h"
+#include "src/simulator.h"
+#include "src/snapshot/snapshot.h"
+
+// To generate the binary files for the test function, enable this section and
+// run GenerateTestFunctionData once on each arch.
+#define GENERATE_TEST_FUNCTION_DATA false
+
+namespace v8 {
+namespace internal {
+namespace test_isolate_independent_builtins {
+
+#ifdef V8_EMBEDDED_BUILTINS
+TEST(VerifyBuiltinsIsolateIndependence) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handle_scope(isolate);
+
+ Snapshot::EnsureAllBuiltinsAreDeserialized(isolate);
+
+ // Build a white-list of all isolate-independent RelocInfo entry kinds.
+ constexpr int all_real_modes_mask =
+ (1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
+ constexpr int mode_mask =
+ all_real_modes_mask & ~RelocInfo::ModeMask(RelocInfo::COMMENT) &
+ ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) &
+ ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) &
+ ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
+ ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ STATIC_ASSERT(RelocInfo::LAST_REAL_RELOC_MODE == RelocInfo::VENEER_POOL);
+ STATIC_ASSERT(RelocInfo::ModeMask(RelocInfo::COMMENT) ==
+ (1 << RelocInfo::COMMENT));
+ STATIC_ASSERT(
+ mode_mask ==
+ (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_HANDLE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE)));
+
+ constexpr bool kVerbose = false;
+ bool found_mismatch = false;
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code* code = isolate->builtins()->builtin(i);
+
+ if (kVerbose) {
+ printf("%s %s\n", Builtins::KindNameOf(i), isolate->builtins()->name(i));
+ }
+
+ bool is_isolate_independent = true;
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ is_isolate_independent = false;
+
+#ifdef ENABLE_DISASSEMBLER
+ if (kVerbose) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ printf(" %s\n", RelocInfo::RelocModeName(mode));
+ }
+#endif
+ }
+
+ const bool expected_result = Builtins::IsIsolateIndependent(i);
+ if (is_isolate_independent != expected_result) {
+ found_mismatch = true;
+ printf("%s %s expected: %d, is: %d\n", Builtins::KindNameOf(i),
+ isolate->builtins()->name(i), expected_result,
+ is_isolate_independent);
+ }
+ }
+
+ CHECK(!found_mismatch);
+}
+
+TEST(VerifyBuiltinsOffHeapSafety) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handle_scope(isolate);
+
+ Snapshot::EnsureAllBuiltinsAreDeserialized(isolate);
+
+ constexpr int all_real_modes_mask =
+ (1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
+ constexpr int mode_mask =
+ all_real_modes_mask & ~RelocInfo::ModeMask(RelocInfo::COMMENT) &
+ ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) &
+ ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) &
+ ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
+ ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL) &
+ ~RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
+ STATIC_ASSERT(RelocInfo::LAST_REAL_RELOC_MODE == RelocInfo::VENEER_POOL);
+ STATIC_ASSERT(RelocInfo::ModeMask(RelocInfo::COMMENT) ==
+ (1 << RelocInfo::COMMENT));
+ STATIC_ASSERT(
+ mode_mask ==
+ (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_HANDLE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY)));
+
+ constexpr bool kVerbose = false;
+ bool found_mismatch = false;
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code* code = isolate->builtins()->builtin(i);
+
+ if (kVerbose) {
+ printf("%s %s\n", Builtins::KindNameOf(i), isolate->builtins()->name(i));
+ }
+
+ bool is_off_heap_safe = true;
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ is_off_heap_safe = false;
+#ifdef ENABLE_DISASSEMBLER
+ if (kVerbose) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ printf(" %s\n", RelocInfo::RelocModeName(mode));
+ }
+#endif
+ }
+
+ // TODO(jgruber): Remove once we properly set up the on-heap code
+ // trampoline.
+ if (Builtins::IsTooShortForOffHeapTrampoline(i)) is_off_heap_safe = false;
+
+ const bool expected_result = Builtins::IsOffHeapSafe(i);
+ if (is_off_heap_safe != expected_result) {
+ found_mismatch = true;
+ printf("%s %s expected: %d, is: %d\n", Builtins::KindNameOf(i),
+ isolate->builtins()->name(i), expected_result, is_off_heap_safe);
+ }
+ }
+
+ CHECK(!found_mismatch);
+}
+#endif // V8_EMBEDDED_BUILTINS
+
+// V8_CC_MSVC is true for both MSVC and clang on windows. clang can handle
+// __asm__-style inline assembly but MSVC cannot, and thus we need a more
+// precise compiler detection that can distinguish between the two. clang on
+// windows sets both __clang__ and _MSC_VER, MSVC sets only _MSC_VER.
+#if defined(_MSC_VER) && !defined(__clang__)
+#define V8_COMPILER_IS_MSVC
+#endif
+
+#ifndef V8_COMPILER_IS_MSVC
+#if GENERATE_TEST_FUNCTION_DATA
+
+// Arch-specific defines.
+#if V8_TARGET_ARCH_IA32
+#define TEST_FUNCTION_FILE "f-ia32.bin"
+#elif V8_TARGET_ARCH_X64 && _WIN64
+#define TEST_FUNCTION_FILE "f-x64-win.bin"
+#elif V8_TARGET_ARCH_X64
+#define TEST_FUNCTION_FILE "f-x64.bin"
+#elif V8_TARGET_ARCH_ARM64
+#define TEST_FUNCTION_FILE "f-arm64.bin"
+#elif V8_TARGET_ARCH_ARM
+#define TEST_FUNCTION_FILE "f-arm.bin"
+#elif V8_TARGET_ARCH_PPC
+#define TEST_FUNCTION_FILE "f-ppc.bin"
+#elif V8_TARGET_ARCH_MIPS
+#define TEST_FUNCTION_FILE "f-mips.bin"
+#elif V8_TARGET_ARCH_MIPS64
+#define TEST_FUNCTION_FILE "f-mips64.bin"
+#elif V8_TARGET_ARCH_S390
+#define TEST_FUNCTION_FILE "f-s390.bin"
+#else
+#error "Unknown architecture."
+#endif
+
+#define __ masm.
+
+TEST(GenerateTestFunctionData) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+#if V8_TARGET_ARCH_IA32
+ v8::internal::byte buffer[256];
+ Assembler masm(isolate, buffer, sizeof(buffer));
+
+ __ mov(eax, Operand(esp, 4));
+ __ add(eax, Operand(esp, 8));
+ __ ret(0);
+#elif V8_TARGET_ARCH_X64
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ Assembler masm(isolate, buffer, static_cast<int>(allocated));
+
+#ifdef _WIN64
+ static const Register arg1 = rcx;
+ static const Register arg2 = rdx;
+#else
+ static const Register arg1 = rdi;
+ static const Register arg2 = rsi;
+#endif
+
+ __ movq(rax, arg2);
+ __ addq(rax, arg1);
+ __ ret(0);
+#elif V8_TARGET_ARCH_ARM64
+ MacroAssembler masm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+
+ __ Add(x0, x0, x1);
+ __ Ret();
+#elif V8_TARGET_ARCH_ARM
+ Assembler masm(isolate, nullptr, 0);
+
+ __ add(r0, r0, Operand(r1));
+ __ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_PPC
+ Assembler masm(isolate, nullptr, 0);
+
+ __ function_descriptor();
+ __ add(r3, r3, r4);
+ __ blr();
+#elif V8_TARGET_ARCH_MIPS
+ MacroAssembler masm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+
+ __ addu(v0, a0, a1);
+ __ jr(ra);
+ __ nop();
+#elif V8_TARGET_ARCH_MIPS64
+ MacroAssembler masm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+
+ __ addu(v0, a0, a1);
+ __ jr(ra);
+ __ nop();
+#elif V8_TARGET_ARCH_S390
+ Assembler masm(isolate, nullptr, 0);
+
+ __ agr(r2, r3);
+ __ b(r14);
+#else // Unknown architecture.
+#error "Unknown architecture."
+#endif // Target architecture.
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+
+ std::ofstream of(TEST_FUNCTION_FILE, std::ios::out | std::ios::binary);
+ of.write(reinterpret_cast<char*>(desc.buffer), desc.instr_size);
+}
+#endif // GENERATE_TEST_FUNCTION_DATA
+
+#if V8_TARGET_ARCH_IA32
+#define FUNCTION_BYTES \
+ ".byte 0x8b, 0x44, 0x24, 0x04, 0x03, 0x44, 0x24, 0x08, 0xc3\n"
+#elif V8_TARGET_ARCH_X64 && _WIN64
+#define FUNCTION_BYTES ".byte 0x48, 0x8b, 0xc2, 0x48, 0x03, 0xc1, 0xc3\n"
+#elif V8_TARGET_ARCH_X64
+#define FUNCTION_BYTES ".byte 0x48, 0x8b, 0xc6, 0x48, 0x03, 0xc7, 0xc3\n"
+#elif V8_TARGET_ARCH_ARM64
+#define FUNCTION_BYTES ".byte 0x00, 0x00, 0x01, 0x8b, 0xc0, 0x03, 0x5f, 0xd6\n"
+#elif V8_TARGET_ARCH_ARM
+#define FUNCTION_BYTES ".byte 0x01, 0x00, 0x80, 0xe0, 0x0e, 0xf0, 0xa0, 0xe1\n"
+#elif V8_TARGET_ARCH_PPC
+#define FUNCTION_BYTES ".byte 0x14, 0x22, 0x63, 0x7c, 0x20, 0x00, 0x80, 0x4e\n"
+#elif V8_TARGET_ARCH_MIPS
+#define FUNCTION_BYTES \
+ ".byte 0x21, 0x10, 0x85, 0x00, 0x08, 0x00, 0xe0, " \
+ "0x03, 0x00, 0x00, 0x00, 0x00\n"
+#elif V8_TARGET_ARCH_MIPS64
+#define FUNCTION_BYTES \
+ ".byte 0x21, 0x10, 0x85, 0x00, 0x08, 0x00, 0xe0, " \
+ "0x03, 0x00, 0x00, 0x00, 0x00\n"
+#elif V8_TARGET_ARCH_S390
+#define FUNCTION_BYTES \
+ ".byte 0xb9, 0x08, 0x00, 0x23, 0x07, 0xfe\n"
+#else
+#error "Unknown architecture."
+#endif
+
+// .byte macros to handle small differences across operating systems.
+
+#if defined(V8_OS_MACOSX)
+#define ASM_RODATA_SECTION ".const_data\n"
+#define ASM_TEXT_SECTION ".text\n"
+#define ASM_MANGLE_LABEL "_"
+#define ASM_GLOBAL(NAME) ".globl " ASM_MANGLE_LABEL NAME "\n"
+#elif defined(V8_OS_WIN)
+#define ASM_RODATA_SECTION ".section .rodata\n"
+#define ASM_TEXT_SECTION ".section .text\n"
+#if defined(V8_TARGET_ARCH_X64)
+#define ASM_MANGLE_LABEL ""
+#else
+#define ASM_MANGLE_LABEL "_"
+#endif
+#define ASM_GLOBAL(NAME) ".global " ASM_MANGLE_LABEL NAME "\n"
+#else
+#define ASM_RODATA_SECTION ".section .rodata\n"
+#define ASM_TEXT_SECTION ".section .text\n"
+#define ASM_MANGLE_LABEL ""
+#define ASM_GLOBAL(NAME) ".global " ASM_MANGLE_LABEL NAME "\n"
+#endif
+
+// clang-format off
+#define EMBED_IN_RODATA_HEADER(LABEL) \
+ __asm__(ASM_RODATA_SECTION \
+ ASM_GLOBAL(#LABEL) \
+ ".balign 16\n" \
+ ASM_MANGLE_LABEL #LABEL ":\n");
+
+#define EMBED_IN_TEXT_HEADER(LABEL) \
+ __asm__(ASM_TEXT_SECTION \
+ ASM_GLOBAL(#LABEL) \
+ ".balign 16\n" \
+ ASM_MANGLE_LABEL #LABEL ":\n");
+
+EMBED_IN_RODATA_HEADER(test_string0_bytes)
+__asm__(".byte 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37\n"
+ ".byte 0x38, 0x39, 0x0a, 0x00\n");
+extern "C" V8_ALIGNED(16) const char test_string0_bytes[];
+
+EMBED_IN_TEXT_HEADER(test_function0_bytes)
+__asm__(FUNCTION_BYTES);
+extern "C" V8_ALIGNED(16) const char test_function0_bytes[];
+// clang-format on
+
+// A historical note: We use .byte over .incbin since the latter leads to
+// complications involving generation of build-time dependencies. Goma parses
+// #include statements, and clang has -MD/-MMD. Neither recognize .incbin.
+
+TEST(ByteInRodata) {
+ CHECK_EQ(0, std::strcmp("0123456789\n", test_string0_bytes));
+}
+
+TEST(ByteInText) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ auto f = GeneratedCode<int(int, int)>::FromAddress(
+ isolate, const_cast<char*>(test_function0_bytes));
+ CHECK_EQ(7, f.Call(3, 4));
+ CHECK_EQ(11, f.Call(5, 6));
+}
+#endif // #ifndef V8_COMPILER_IS_MSVC
+#undef V8_COMPILER_IS_MSVC
+
+#undef __
+#undef ASM_GLOBAL
+#undef ASM_MANGLE_LABEL
+#undef ASM_RODATA_SECTION
+#undef ASM_TEXT_SECTION
+#undef EMBED_IN_RODATA_HEADER
+#undef EMBED_IN_TEXT_HEADER
+#undef FUNCTION_BYTES
+#undef GENERATE_TEST_FUNCTION_DATA
+#undef TEST_FUNCTION_FILE
+
+} // namespace test_isolate_independent_builtins
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 0579010292..4fb23edf83 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -53,6 +53,10 @@ using v8::internal::EmbeddedVector;
using v8::internal::Logger;
using v8::internal::StrLength;
+namespace internal {
+class InstructionStream;
+}
+
namespace {
@@ -699,6 +703,10 @@ TEST(Issue539892) {
private:
void LogRecordedBuffer(i::AbstractCode* code, i::SharedFunctionInfo* shared,
const char* name, int length) override {}
+ void LogRecordedBuffer(const i::InstructionStream* stream, const char* name,
+ int length) override {}
+ void LogRecordedBuffer(i::wasm::WasmCode* code, const char* name,
+ int length) override {}
} code_event_logger;
SETUP_FLAGS();
v8::Isolate::CreateParams create_params;
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 6ace37c8b4..c9f601308f 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -492,7 +492,7 @@ TEST(OperandOffset) {
__ leaq(r13, Operand(rbp, -3 * kPointerSize));
__ leaq(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
- __ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
+ __ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE);
__ movl(rax, Immediate(1));
Operand sp0 = Operand(rsp, 0);
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 52a7d3ff7a..9777b0220a 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -34,7 +34,6 @@
#include "src/v8.h"
#include "src/api.h"
-#include "src/ast/ast-numbering.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/compiler.h"
@@ -263,125 +262,6 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
};
-TEST(UsingCachedData) {
- // Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy) return;
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handles(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
-
- // Source containing functions that might be lazily compiled and all types
- // of symbols (string, propertyName, regexp).
- const char* source =
- "var x = 42;"
- "function foo(a) { return function nolazy(b) { return a + b; } }"
- "function bar(a) { if (a) return function lazy(b) { return b; } }"
- "var z = {'string': 'string literal', bareword: 'propertyName', "
- " 42: 'number literal', for: 'keyword as propertyName', "
- " f\\u006fr: 'keyword propertyname with escape'};"
- "var v = /RegExp Literal/;"
- "var w = /RegExp Literal\\u0020With Escape/gi;"
- "var y = { get getter() { return 42; }, "
- " set setter(v) { this.value = v; }};"
- "var f = a => function (b) { return a + b; };"
- "var g = a => b => a + b;";
- int source_length = i::StrLength(source);
-
- // ScriptResource will be deleted when the corresponding String is GCd.
- v8::ScriptCompiler::Source script_source(
- v8::String::NewExternalOneByte(isolate,
- new ScriptResource(source, source_length))
- .ToLocalChecked());
- v8::ScriptCompiler::Compile(isolate->GetCurrentContext(), &script_source,
- v8::ScriptCompiler::kProduceParserCache)
- .ToLocalChecked();
- CHECK(script_source.GetCachedData());
-
- // Compile the script again, using the cached data.
- bool lazy_flag = i::FLAG_lazy;
- i::FLAG_lazy = true;
- v8::ScriptCompiler::Compile(isolate->GetCurrentContext(), &script_source,
- v8::ScriptCompiler::kConsumeParserCache)
- .ToLocalChecked();
- i::FLAG_lazy = false;
- v8::ScriptCompiler::CompileUnboundScript(
- isolate, &script_source, v8::ScriptCompiler::kConsumeParserCache)
- .ToLocalChecked();
- i::FLAG_lazy = lazy_flag;
-}
-
-
-TEST(PreparseFunctionDataIsUsed) {
- // Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy) return;
-
- // This tests that we actually do use the function data generated by the
- // preparser.
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handles(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
-
- const char* good_code[] = {
- "function z() { var a; } function f() { return 25; } f();",
- "var z = function () { var a; }; function f() { return 25; } f();",
- "function *z() { var a; } function f() { return 25; } f();",
- "var z = function *() { var a; }; function f() { return 25; } f();",
- "function z(p1, p2) { var a; } function f() { return 25; } f();",
- "var z = function (p1, p2) { var a; }; function f() { return 25; } f();",
- "function *z(p1, p2) { var a; } function f() { return 25; } f();",
- "var z = function *(p1, p2) { var a; }; function f() { return 25; } f();",
- "var z = () => { var a; }; function f() { return 25; } f();",
- "var z = (p1, p2) => { var a; }; function f() { return 25; } f();",
- };
-
- // Insert a syntax error inside the lazy function.
- const char* bad_code[] = {
- "function z() { if ( } function f() { return 25; } f();",
- "var z = function () { if ( }; function f() { return 25; } f();",
- "function *z() { if ( } function f() { return 25; } f();",
- "var z = function *() { if ( }; function f() { return 25; } f();",
- "function z(p1, p2) { if ( } function f() { return 25; } f();",
- "var z = function (p1, p2) { if ( }; function f() { return 25; } f();",
- "function *z(p1, p2) { if ( } function f() { return 25; } f();",
- "var z = function *(p1, p2) { if ( }; function f() { return 25; } f();",
- "var z = () => { if ( }; function f() { return 25; } f();",
- "var z = (p1, p2) => { if ( }; function f() { return 25; } f();",
- };
-
- for (unsigned i = 0; i < arraysize(good_code); i++) {
- v8::ScriptCompiler::Source good_source(v8_str(good_code[i]));
- v8::ScriptCompiler::Compile(isolate->GetCurrentContext(), &good_source,
- v8::ScriptCompiler::kProduceParserCache)
- .ToLocalChecked();
-
- const v8::ScriptCompiler::CachedData* cached_data =
- good_source.GetCachedData();
- CHECK_NOT_NULL(cached_data->data);
- CHECK_GT(cached_data->length, 0);
-
- // Now compile the erroneous code with the good preparse data. If the
- // preparse data is used, the lazy function is skipped and it should
- // compile fine.
- v8::ScriptCompiler::Source bad_source(
- v8_str(bad_code[i]), new v8::ScriptCompiler::CachedData(
- cached_data->data, cached_data->length));
- v8::Local<v8::Value> result =
- CompileRun(isolate->GetCurrentContext(), &bad_source,
- v8::ScriptCompiler::kConsumeParserCache);
- CHECK(result->IsInt32());
- CHECK_EQ(25, result->Int32Value(isolate->GetCurrentContext()).FromJust());
- }
-}
-
-
TEST(StandAlonePreParser) {
v8::V8::Initialize();
i::Isolate* i_isolate = CcTest::i_isolate();
@@ -453,42 +333,6 @@ TEST(StandAlonePreParserNoNatives) {
}
-TEST(PreparsingObjectLiterals) {
- // Regression test for a bug where the symbol stream produced by PreParser
- // didn't match what Parser wanted to consume.
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handles(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
-
- {
- const char* source = "var myo = {if: \"foo\"}; myo.if;";
- v8::Local<v8::Value> result = ParserCacheCompileRun(source);
- CHECK(result->IsString());
- v8::String::Utf8Value utf8(isolate, result);
- CHECK_EQ(0, strcmp("foo", *utf8));
- }
-
- {
- const char* source = "var myo = {\"bar\": \"foo\"}; myo[\"bar\"];";
- v8::Local<v8::Value> result = ParserCacheCompileRun(source);
- CHECK(result->IsString());
- v8::String::Utf8Value utf8(isolate, result);
- CHECK_EQ(0, strcmp("foo", *utf8));
- }
-
- {
- const char* source = "var myo = {1: \"foo\"}; myo[1];";
- v8::Local<v8::Value> result = ParserCacheCompileRun(source);
- CHECK(result->IsString());
- v8::String::Utf8Value utf8(isolate, result);
- CHECK_EQ(0, strcmp("foo", *utf8));
- }
-}
-
-
TEST(RegressChromium62639) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
@@ -522,48 +366,6 @@ TEST(RegressChromium62639) {
}
-TEST(Regress928) {
- // Test only applies when lazy parsing.
- if (!i::FLAG_lazy) return;
-
- // Tests that the first non-toplevel function is not included in the preparse
- // data.
- const char* program =
- "try { } catch (e) { var foo = function () { /* first */ } }"
- "var bar = function () { /* second */ }";
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handles(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- v8::ScriptCompiler::Source script_source(v8_str(program));
- v8::ScriptCompiler::Compile(context, &script_source,
- v8::ScriptCompiler::kProduceParserCache)
- .ToLocalChecked();
-
- const v8::ScriptCompiler::CachedData* cached_data =
- script_source.GetCachedData();
- i::ScriptData script_data(cached_data->data, cached_data->length);
- std::unique_ptr<i::ParseData> pd(i::ParseData::FromCachedData(&script_data));
- pd->Initialize();
-
- int first_function =
- static_cast<int>(strstr(program, "function") - program);
- int first_lparen = first_function + i::StrLength("function ");
- CHECK_EQ('(', program[first_lparen]);
- i::FunctionEntry entry1 = pd->GetFunctionEntry(first_lparen);
- CHECK(!entry1.is_valid());
-
- int second_function =
- static_cast<int>(strstr(program + first_lparen, "function") - program);
- int second_lparen = second_function + i::StrLength("function ");
- CHECK_EQ('(', program[second_lparen]);
- i::FunctionEntry entry2 = pd->GetFunctionEntry(second_lparen);
- CHECK(entry2.is_valid());
- CHECK_EQ('}', program[entry2.end_pos() - 1]);
-}
-
-
TEST(PreParseOverflow) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
@@ -845,7 +647,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
CHECK(i::parsing::ParseProgram(&info, isolate));
CHECK(i::Rewriter::Rewrite(&info));
info.ast_value_factory()->Internalize(isolate);
- i::DeclarationScope::Analyze(&info);
+ CHECK(i::DeclarationScope::Analyze(&info));
i::DeclarationScope::AllocateScopeInfos(&info, isolate,
i::AnalyzeMode::kRegular);
CHECK_NOT_NULL(info.literal());
@@ -1313,7 +1115,6 @@ const char* ReadString(unsigned* start) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonyFunctionSent,
kAllowHarmonyPublicFields,
kAllowHarmonyPrivateFields,
kAllowHarmonyStaticFields,
@@ -1331,7 +1132,6 @@ enum ParserSyncTestResult {
void SetGlobalFlags(i::EnumSet<ParserFlag> flags) {
i::FLAG_allow_natives_syntax = flags.Contains(kAllowNatives);
- i::FLAG_harmony_function_sent = flags.Contains(kAllowHarmonyFunctionSent);
i::FLAG_harmony_public_fields = flags.Contains(kAllowHarmonyPublicFields);
i::FLAG_harmony_private_fields = flags.Contains(kAllowHarmonyPrivateFields);
i::FLAG_harmony_static_fields = flags.Contains(kAllowHarmonyStaticFields);
@@ -1344,8 +1144,6 @@ void SetGlobalFlags(i::EnumSet<ParserFlag> flags) {
void SetParserFlags(i::PreParser* parser, i::EnumSet<ParserFlag> flags) {
parser->set_allow_natives(flags.Contains(kAllowNatives));
- parser->set_allow_harmony_function_sent(
- flags.Contains(kAllowHarmonyFunctionSent));
parser->set_allow_harmony_public_fields(
flags.Contains(kAllowHarmonyPublicFields));
parser->set_allow_harmony_private_fields(
@@ -1479,7 +1277,10 @@ void TestParserSync(const char* source, const ParserFlag* varying_flags,
bool is_module = false, bool test_preparser = true,
bool ignore_error_msg = false) {
i::Handle<i::String> str =
- CcTest::i_isolate()->factory()->NewStringFromAsciiChecked(source);
+ CcTest::i_isolate()
+ ->factory()
+ ->NewStringFromUtf8(Vector<const char>(source, strlen(source)))
+ .ToHandleChecked();
for (int bits = 0; bits < (1 << varying_flags_length); bits++) {
i::EnumSet<ParserFlag> flags;
for (size_t flag_index = 0; flag_index < varying_flags_length;
@@ -2350,63 +2151,6 @@ TEST(NoErrorsIdentifierNames) {
RunParserSyncTest(context_data, statement_data, kSuccess);
}
-
-TEST(DontRegressPreParserDataSizes) {
- // These tests make sure that Parser doesn't start producing less "preparse
- // data" (data which the embedder can cache).
-
- v8::V8::Initialize();
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handles(isolate);
-
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
-
- struct TestCase {
- const char* program;
- int functions;
- } test_cases[] = {
- // No functions.
- {"var x = 42;", 0},
- // Functions.
- {"function foo() {}", 1},
- {"function foo() {} function bar() {}", 2},
- // Getter / setter functions are recorded as functions if they're on the
- // top
- // level.
- {"var x = {get foo(){} };", 1},
- // Functions insize lazy functions are not recorded.
- {"function lazy() { function a() {} function b() {} function c() {} }",
- 1},
- {"function lazy() { var x = {get foo(){} } }", 1},
- {nullptr, 0}};
-
- for (int i = 0; test_cases[i].program; i++) {
- const char* program = test_cases[i].program;
- i::Factory* factory = CcTest::i_isolate()->factory();
- i::Handle<i::String> source =
- factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
- i::Handle<i::Script> script = factory->NewScript(source);
- i::ParseInfo info(script);
- i::ScriptData* sd = nullptr;
- info.set_cached_data(&sd);
- info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
- i::parsing::ParseProgram(&info, CcTest::i_isolate());
- i::ParseData* pd = i::ParseData::FromCachedData(sd);
-
- if (pd->FunctionCount() != test_cases[i].functions) {
- FATAL(
- "Expected preparse data for program:\n"
- "\t%s\n"
- "to contain %d functions, however, received %d functions.\n",
- program, test_cases[i].functions, pd->FunctionCount());
- }
- delete sd;
- delete pd;
- }
-}
-
-
TEST(FunctionDeclaresItselfStrict) {
// Tests that we produce the right kinds of errors when a function declares
// itself strict (we cannot produce there errors as soon as we see the
@@ -3900,15 +3644,44 @@ TEST(BothModesUseCount) {
TEST(LineOrParagraphSeparatorAsLineTerminator) {
// Tests that both preparsing and parsing accept U+2028 LINE SEPARATOR and
- // U+2029 PARAGRAPH SEPARATOR as LineTerminator symbols.
+ // U+2029 PARAGRAPH SEPARATOR as LineTerminator symbols outside of string
+ // literals.
const char* context_data[][2] = {{"", ""}, {nullptr, nullptr}};
- const char* statement_data[] = {"\x31\xE2\x80\xA8\x32", // "1<U+2028>2"
- "\x31\xE2\x80\xA9\x32", // "1<U+2029>2"
+ const char* statement_data[] = {"\x31\xE2\x80\xA8\x32", // 1<U+2028>2
+ "\x31\xE2\x80\xA9\x32", // 1<U+2029>2
+ nullptr};
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+TEST(LineOrParagraphSeparatorInStringLiteral) {
+ // Tests that both preparsing and parsing treat U+2028 LINE SEPARATOR and
+ // U+2029 PARAGRAPH SEPARATOR as line terminators within string literals
+ // when the "subsume JSON" flag is disabled.
+ v8::internal::FLAG_harmony_subsume_json = false;
+ const char* context_data[][2] = {
+ {"\"", "\""}, {"'", "'"}, {nullptr, nullptr}};
+ const char* statement_data[] = {"\x31\xE2\x80\xA8\x32", // 1<U+2028>2
+ "\x31\xE2\x80\xA9\x32", // 1<U+2029>2
nullptr};
RunParserSyncTest(context_data, statement_data, kError);
}
+TEST(LineOrParagraphSeparatorInStringLiteralHarmony) {
+ // Tests that both preparsing and parsing don't treat U+2028 LINE SEPARATOR
+ // and U+2029 PARAGRAPH SEPARATOR as line terminators within string literals
+ // when the "subsume JSON" flag is enabled.
+ v8::internal::FLAG_harmony_subsume_json = true;
+ const char* context_data[][2] = {
+ {"\"", "\""}, {"'", "'"}, {nullptr, nullptr}};
+ const char* statement_data[] = {"\x31\xE2\x80\xA8\x32", // 1<U+2028>2
+ "\x31\xE2\x80\xA9\x32", // 1<U+2029>2
+ nullptr};
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
TEST(ErrorsArrowFormalParameters) {
const char* context_data[][2] = {
{ "()", "=>{}" },
@@ -5130,6 +4903,9 @@ TEST(PrivateClassFieldsErrors) {
"#async a = 0",
"#async a",
+ "#constructor",
+ "#constructor = function() {}",
+
"# a = 0",
"#a() { }",
"get #a() { }",
@@ -5160,6 +4936,14 @@ TEST(PrivateClassFieldsErrors) {
"#a = f(arguments)",
"#a = () => () => arguments",
+ "foo() { delete this.#a }",
+ "foo() { delete this.x.#a }",
+ "foo() { delete this.x().#a }",
+
+ "foo() { delete f.#a }",
+ "foo() { delete f.x.#a }",
+ "foo() { delete f.x().#a }",
+
// ASI requires a linebreak
"#a b",
"#a = 0 b",
@@ -5309,6 +5093,16 @@ TEST(PrivateNameNoErrors) {
"foo.#b.#a",
"foo.#b.#a()",
+ "foo().#a",
+ "foo().b.#a",
+ "foo().b().#a",
+ "foo().b().#a()",
+ "foo().b().#a.bar",
+ "foo().b().#a.bar()",
+
+ "foo(this.#a)",
+ "foo(bar().#a)",
+
"new foo.#a",
"new foo.#b.#a",
"new foo.#b.#a()",
@@ -8645,26 +8439,6 @@ TEST(EscapeSequenceErrors) {
RunParserSyncTest(context_data, error_data, kError);
}
-
-TEST(FunctionSentErrors) {
- // clang-format off
- const char* context_data[][2] = {
- { "'use strict'", "" },
- { "", "" },
- { nullptr, nullptr }
- };
- const char* error_data[] = {
- "var x = function.sent",
- "function* g() { yield function.s\\u0065nt; }",
- nullptr
- };
- // clang-format on
-
- static const ParserFlag always_flags[] = {kAllowHarmonyFunctionSent};
- RunParserSyncTest(context_data, error_data, kError, always_flags,
- arraysize(always_flags));
-}
-
TEST(NewTargetErrors) {
// clang-format off
const char* context_data[][2] = {
@@ -10293,7 +10067,7 @@ TEST(LexicalLoopVariable) {
info.set_allow_lazy_parsing(false);
CHECK(i::parsing::ParseProgram(&info, isolate));
CHECK(i::Rewriter::Rewrite(&info));
- i::DeclarationScope::Analyze(&info);
+ CHECK(i::DeclarationScope::Analyze(&info));
i::DeclarationScope::AllocateScopeInfos(&info, isolate,
i::AnalyzeMode::kRegular);
CHECK_NOT_NULL(info.literal());
@@ -10304,8 +10078,8 @@ TEST(LexicalLoopVariable) {
test(info, script_scope);
};
- // Check `let` loop variables is a stack local when not captured by an eval
- // or closure within the area of the loop body.
+ // Check `let` loop variables is a stack local when not captured by
+ // an eval or closure within the area of the loop body.
const char* local_bindings[] = {
"function loop() {"
" for (let loop_var = 0; loop_var < 10; ++loop_var) {"
@@ -10464,6 +10238,107 @@ TEST(LexicalLoopVariable) {
}
}
+TEST(PrivateNamesSyntaxError) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+ LocalContext env;
+
+ auto test = [isolate](const char* program, bool is_lazy) {
+ i::Factory* const factory = isolate->factory();
+ i::Handle<i::String> source =
+ factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::ParseInfo info(script);
+
+ info.set_allow_lazy_parsing(is_lazy);
+ i::FLAG_harmony_private_fields = true;
+ CHECK(i::parsing::ParseProgram(&info, isolate));
+ CHECK(i::Rewriter::Rewrite(&info));
+ CHECK(!i::DeclarationScope::Analyze(&info));
+ return info.pending_error_handler()->has_pending_error();
+ };
+
+ const char* data[] = {
+ "class A {"
+ " foo() { return this.#bar; }"
+ "}",
+
+ "let A = class {"
+ " foo() { return this.#bar; }"
+ "}",
+
+ "class A {"
+ " #foo; "
+ " bar() { return this.#baz; }"
+ "}",
+
+ "let A = class {"
+ " #foo; "
+ " bar() { return this.#baz; }"
+ "}",
+
+ "class A {"
+ " bar() {"
+ " class D { #baz = 1; };"
+ " return this.#baz;"
+ " }"
+ "}",
+
+ "let A = class {"
+ " bar() {"
+ " class D { #baz = 1; };"
+ " return this.#baz;"
+ " }"
+ "}",
+
+ "a.#bar",
+
+ "class Foo {};"
+ "Foo.#bar;",
+
+ "let Foo = class {};"
+ "Foo.#bar;",
+
+ "class Foo {};"
+ "(new Foo).#bar;",
+
+ "let Foo = class {};"
+ "(new Foo).#bar;",
+
+ "class Foo { #bar; };"
+ "(new Foo).#bar;",
+
+ "let Foo = class { #bar; };"
+ "(new Foo).#bar;",
+
+ "function t(){"
+ " class Foo { getA() { return this.#foo; } }"
+ "}",
+
+ "function t(){"
+ " return class { getA() { return this.#foo; } }"
+ "}",
+ };
+
+ // TODO(gsathya): The preparser does not track unresolved
+ // variables in top level function which fails this test.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=7468
+ const char* parser_data[] = {
+ "function t() {"
+ " return this.#foo;"
+ "}",
+ };
+
+ for (const char* source : data) {
+ CHECK(test(source, true));
+ CHECK(test(source, false));
+ }
+
+ for (const char* source : parser_data) {
+ CHECK(test(source, false));
+ }
+}
+
} // namespace test_parsing
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
index 000cf34c87..69af784c85 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
@@ -59,8 +59,7 @@ TEST(WasmRelocationArmContextReference) {
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
- isolate, it.rinfo()->wasm_context_reference() + offset,
- SKIP_ICACHE_FLUSH);
+ it.rinfo()->wasm_context_reference() + offset, SKIP_ICACHE_FLUSH);
}
// Call into relocated code object
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
index 59f38e1554..7cb24e539f 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
@@ -64,8 +64,7 @@ TEST(WasmRelocationArm64ContextReference) {
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
- isolate, it.rinfo()->wasm_context_reference() + offset,
- SKIP_ICACHE_FLUSH);
+ it.rinfo()->wasm_context_reference() + offset, SKIP_ICACHE_FLUSH);
}
// Call into relocated code object
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
index 080da36a47..d9e2cac6ee 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
@@ -68,8 +68,7 @@ TEST(WasmRelocationIa32ContextReference) {
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
- isolate, it.rinfo()->wasm_context_reference() + offset,
- SKIP_ICACHE_FLUSH);
+ it.rinfo()->wasm_context_reference() + offset, SKIP_ICACHE_FLUSH);
}
// Check if immediate is updated correctly
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
index 0526ce25a2..f290920ddc 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
@@ -64,8 +64,7 @@ TEST(WasmRelocationX64ContextReference) {
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
DCHECK(RelocInfo::IsWasmContextReference(it.rinfo()->rmode()));
it.rinfo()->set_wasm_context_reference(
- isolate, it.rinfo()->wasm_context_reference() + offset,
- SKIP_ICACHE_FLUSH);
+ it.rinfo()->wasm_context_reference() + offset, SKIP_ICACHE_FLUSH);
}
// Check if immediate is updated correctly
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 70fc8586eb..11a50fa54c 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1228,13 +1228,31 @@ static Handle<SharedFunctionInfo> CompileScript(
Isolate* isolate, Handle<String> source, Handle<String> name,
ScriptData** cached_data, v8::ScriptCompiler::CompileOptions options) {
return Compiler::GetSharedFunctionInfoForScript(
- source, name, 0, 0, v8::ScriptOriginOptions(), Handle<Object>(),
- Handle<Context>(isolate->native_context()), nullptr, cached_data,
- options, ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
- Handle<FixedArray>())
+ source, Compiler::ScriptDetails(name), v8::ScriptOriginOptions(),
+ nullptr, cached_data, options, ScriptCompiler::kNoCacheNoReason,
+ NOT_NATIVES_CODE)
.ToHandleChecked();
}
+static Handle<SharedFunctionInfo> CompileScriptAndProduceCache(
+ Isolate* isolate, Handle<String> source, Handle<String> name,
+ ScriptData** script_data, v8::ScriptCompiler::CompileOptions options) {
+ Handle<SharedFunctionInfo> sfi =
+ Compiler::GetSharedFunctionInfoForScript(
+ source, Compiler::ScriptDetails(name), v8::ScriptOriginOptions(),
+ nullptr, nullptr, options, ScriptCompiler::kNoCacheNoReason,
+ NOT_NATIVES_CODE)
+ .ToHandleChecked();
+ std::unique_ptr<ScriptCompiler::CachedData> cached_data(
+ ScriptCompiler::CreateCodeCache(ToApiHandle<UnboundScript>(sfi),
+ Utils::ToLocal(source)));
+ uint8_t* buffer = NewArray<uint8_t>(cached_data->length);
+ MemCopy(buffer, cached_data->data, cached_data->length);
+ *script_data = new i::ScriptData(buffer, cached_data->length);
+ (*script_data)->AcquireDataOwnership();
+ return sfi;
+}
+
TEST(CodeSerializerOnePlusOne) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -1255,9 +1273,9 @@ TEST(CodeSerializerOnePlusOne) {
ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, orig_source, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, orig_source, Handle<String>(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
int builtins_count = CountBuiltins();
@@ -1297,18 +1315,19 @@ TEST(CodeSerializerPromotedToCompilationCache) {
.ToHandleChecked();
ScriptData* cache = nullptr;
- CompileScript(isolate, src, src, &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ CompileScriptAndProduceCache(isolate, src, src, &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
DisallowCompilation no_compile_expected(isolate);
Handle<SharedFunctionInfo> copy = CompileScript(
isolate, src, src, &cache, v8::ScriptCompiler::kConsumeCodeCache);
- InfoVectorPair pair = isolate->compilation_cache()->LookupScript(
- src, src, 0, 0, v8::ScriptOriginOptions(), isolate->native_context(),
- LanguageMode::kSloppy);
+ MaybeHandle<SharedFunctionInfo> shared =
+ isolate->compilation_cache()->LookupScript(
+ src, src, 0, 0, v8::ScriptOriginOptions(), isolate->native_context(),
+ LanguageMode::kSloppy);
- CHECK(pair.shared() == *copy);
+ CHECK(*shared.ToHandleChecked() == *copy);
delete cache;
}
@@ -1332,11 +1351,11 @@ TEST(CodeSerializerInternalizedString) {
CHECK(orig_source->Equals(*copy_source));
Handle<JSObject> global(isolate->context()->global_object());
- ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, orig_source, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ i::ScriptData* script_data = nullptr;
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, orig_source, Handle<String>(), &script_data,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<JSFunction> orig_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
orig, isolate->native_context());
@@ -1349,7 +1368,7 @@ TEST(CodeSerializerInternalizedString) {
Handle<SharedFunctionInfo> copy;
{
DisallowCompilation no_compile_expected(isolate);
- copy = CompileScript(isolate, copy_source, Handle<String>(), &cache,
+ copy = CompileScript(isolate, copy_source, Handle<String>(), &script_data,
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
@@ -1368,7 +1387,7 @@ TEST(CodeSerializerInternalizedString) {
CHECK(Handle<String>::cast(copy_result)->Equals(*expected));
CHECK_EQ(builtins_count, CountBuiltins());
- delete cache;
+ delete script_data;
}
TEST(CodeSerializerLargeCodeObject) {
@@ -1393,9 +1412,9 @@ TEST(CodeSerializerLargeCodeObject) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
CHECK(isolate->heap()->InSpace(orig->abstract_code(), LO_SPACE));
@@ -1459,9 +1478,9 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
CHECK(heap->InSpace(orig->abstract_code(), LO_SPACE));
@@ -1522,9 +1541,9 @@ TEST(CodeSerializerLargeStrings) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
@@ -1590,9 +1609,9 @@ TEST(CodeSerializerThreeBigStrings) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
@@ -1708,9 +1727,9 @@ TEST(CodeSerializerExternalString) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, source_string, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, source_string, Handle<String>(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
@@ -1765,9 +1784,9 @@ TEST(CodeSerializerLargeExternalString) {
Handle<JSObject> global(isolate->context()->global_object());
ScriptData* cache = nullptr;
- Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, source_str, Handle<String>(), &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
+ isolate, source_str, Handle<String>(), &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
@@ -1815,8 +1834,8 @@ TEST(CodeSerializerExternalScriptName) {
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
- CompileScript(isolate, source_string, name, &cache,
- v8::ScriptCompiler::kProduceCodeCache);
+ CompileScriptAndProduceCache(isolate, source_string, name, &cache,
+ v8::ScriptCompiler::kNoCompileOptions);
Handle<SharedFunctionInfo> copy;
{
@@ -1848,7 +1867,7 @@ static void SerializerCodeEventListener(const v8::JitCodeEvent* event) {
}
}
-v8::ScriptCompiler::CachedData* ProduceCache(
+v8::ScriptCompiler::CachedData* CompileRunAndProduceCache(
const char* source, CodeCacheType cacheType = CodeCacheType::kLazy) {
v8::ScriptCompiler::CachedData* cache;
v8::Isolate::CreateParams create_params;
@@ -1865,12 +1884,10 @@ v8::ScriptCompiler::CachedData* ProduceCache(
v8::ScriptCompiler::Source source(source_str, origin);
v8::ScriptCompiler::CompileOptions options;
switch (cacheType) {
- case CodeCacheType::kLazy:
- options = v8::ScriptCompiler::kProduceCodeCache;
- break;
case CodeCacheType::kEager:
options = v8::ScriptCompiler::kProduceFullCodeCache;
break;
+ case CodeCacheType::kLazy:
case CodeCacheType::kAfterExecute:
options = v8::ScriptCompiler::kNoCompileOptions;
break;
@@ -1881,6 +1898,10 @@ v8::ScriptCompiler::CachedData* ProduceCache(
v8::ScriptCompiler::CompileUnboundScript(isolate1, &source, options)
.ToLocalChecked();
+ if (cacheType != CodeCacheType::kAfterExecute) {
+ cache = ScriptCompiler::CreateCodeCache(script, source_str);
+ }
+
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate1->GetCurrentContext())
.ToLocalChecked();
@@ -1891,13 +1912,6 @@ v8::ScriptCompiler::CachedData* ProduceCache(
if (cacheType == CodeCacheType::kAfterExecute) {
cache = ScriptCompiler::CreateCodeCache(script, source_str);
- } else {
- const ScriptCompiler::CachedData* data = source.GetCachedData();
- CHECK(data);
- uint8_t* buffer = NewArray<uint8_t>(data->length);
- MemCopy(buffer, data->data, data->length);
- cache = new v8::ScriptCompiler::CachedData(
- buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
}
CHECK(cache);
}
@@ -1916,7 +1930,7 @@ void CheckDeserializedFlag(v8::Local<v8::UnboundScript> script) {
TEST(CodeSerializerIsolates) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
- v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
+ v8::ScriptCompiler::CachedData* cache = CompileRunAndProduceCache(source);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1963,7 +1977,7 @@ TEST(CodeSerializerIsolatesEager) {
"}"
"f()() + 'def'";
v8::ScriptCompiler::CachedData* cache =
- ProduceCache(source, CodeCacheType::kEager);
+ CompileRunAndProduceCache(source, CodeCacheType::kEager);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2010,7 +2024,7 @@ TEST(CodeSerializerAfterExecute) {
FLAG_opt = false;
const char* source = "function f() { return 'abc'; }; f() + 'def'";
v8::ScriptCompiler::CachedData* cache =
- ProduceCache(source, CodeCacheType::kAfterExecute);
+ CompileRunAndProduceCache(source, CodeCacheType::kAfterExecute);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2040,7 +2054,7 @@ TEST(CodeSerializerAfterExecute) {
CHECK(sfi->HasBytecodeArray());
BytecodeArray* bytecode = sfi->bytecode_array();
CHECK_EQ(bytecode->interrupt_budget(),
- interpreter::Interpreter::kInterruptBudget);
+ interpreter::Interpreter::InterruptBudget());
CHECK_EQ(bytecode->osr_loop_nesting_level(), 0);
{
@@ -2065,7 +2079,7 @@ TEST(CodeSerializerAfterExecute) {
TEST(CodeSerializerFlagChange) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
- v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
+ v8::ScriptCompiler::CachedData* cache = CompileRunAndProduceCache(source);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2092,7 +2106,7 @@ TEST(CodeSerializerFlagChange) {
TEST(CodeSerializerBitFlip) {
const char* source = "function f() { return 'abc'; }; f() + 'def'";
- v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
+ v8::ScriptCompiler::CachedData* cache = CompileRunAndProduceCache(source);
// Random bit flip.
const_cast<uint8_t*>(cache->data)[337] ^= 0x40;
@@ -2141,15 +2155,10 @@ TEST(CodeSerializerWithHarmonyScoping) {
v8::ScriptCompiler::Source source(source_str, origin);
v8::Local<v8::UnboundScript> script =
v8::ScriptCompiler::CompileUnboundScript(
- isolate1, &source, v8::ScriptCompiler::kProduceCodeCache)
+ isolate1, &source, v8::ScriptCompiler::kNoCompileOptions)
.ToLocalChecked();
- const v8::ScriptCompiler::CachedData* data = source.GetCachedData();
- CHECK(data);
- // Persist cached data.
- uint8_t* buffer = NewArray<uint8_t>(data->length);
- MemCopy(buffer, data->data, data->length);
- cache = new v8::ScriptCompiler::CachedData(
- buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
+ cache = v8::ScriptCompiler::CreateCodeCache(script, source_str);
+ CHECK(cache);
v8::Local<v8::Value> result = script->BindToCurrentContext()
->Run(isolate1->GetCurrentContext())
@@ -2205,14 +2214,9 @@ TEST(Regress503552) {
Handle<String> source = isolate->factory()->NewStringFromAsciiChecked(
"function f() {} function g() {}");
ScriptData* script_data = nullptr;
- Handle<SharedFunctionInfo> shared =
- Compiler::GetSharedFunctionInfoForScript(
- source, MaybeHandle<String>(), 0, 0, v8::ScriptOriginOptions(),
- MaybeHandle<Object>(), Handle<Context>(isolate->native_context()),
- nullptr, &script_data, v8::ScriptCompiler::kProduceCodeCache,
- ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE,
- MaybeHandle<FixedArray>())
- .ToHandleChecked();
+ Handle<SharedFunctionInfo> shared = CompileScriptAndProduceCache(
+ isolate, source, Handle<String>(), &script_data,
+ v8::ScriptCompiler::kNoCompileOptions);
delete script_data;
heap::SimulateIncrementalMarking(isolate->heap());
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index ba6186828d..45b4cc4a3d 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1335,99 +1335,6 @@ UNINITIALIZED_TEST(OneByteArrayJoin) {
isolate->Dispose();
}
-
-static void CheckException(const char* source) {
- // An empty handle is returned upon exception.
- CHECK(CompileRun(source).IsEmpty());
-}
-
-
-TEST(RobustSubStringStub) {
- // This tests whether the SubStringStub can handle unsafe arguments.
- // If not recognized, those unsafe arguments lead to out-of-bounds reads.
- FLAG_allow_natives_syntax = true;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Value> result;
- Handle<String> string;
- CompileRun("var short = 'abcdef';");
-
- // Invalid indices.
- CheckException("%_SubString(short, 0, 10000);");
- CheckException("%_SubString(short, -1234, 5);");
- CheckException("%_SubString(short, 5, 2);");
- // Special HeapNumbers.
- CheckException("%_SubString(short, 1, Infinity);");
- CheckException("%_SubString(short, NaN, 5);");
- // String arguments.
- CheckException("%_SubString(short, '2', '5');");
- // Ordinary HeapNumbers can be handled (in runtime).
- result = CompileRun("%_SubString(short, Math.sqrt(4), 5.1);");
- string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK_EQ(0, strcmp("cde", string->ToCString().get()));
-
- CompileRun("var long = 'abcdefghijklmnopqrstuvwxyz';");
- // Invalid indices.
- CheckException("%_SubString(long, 0, 10000);");
- CheckException("%_SubString(long, -1234, 17);");
- CheckException("%_SubString(long, 17, 2);");
- // Special HeapNumbers.
- CheckException("%_SubString(long, 1, Infinity);");
- CheckException("%_SubString(long, NaN, 17);");
- // String arguments.
- CheckException("%_SubString(long, '2', '17');");
- // Ordinary HeapNumbers within bounds can be handled (in runtime).
- result = CompileRun("%_SubString(long, Math.sqrt(4), 17.1);");
- string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK_EQ(0, strcmp("cdefghijklmnopq", string->ToCString().get()));
-
- // Test that out-of-bounds substring of a slice fails when the indices
- // would have been valid for the underlying string.
- CompileRun("var slice = long.slice(1, 15);");
- CheckException("%_SubString(slice, 0, 17);");
-}
-
-TEST(RobustSubStringStubExternalStrings) {
- // Ensure that the specific combination of calling the SubStringStub on an
- // external string and triggering a GC on string allocation does not crash.
- // See crbug.com/649967.
-
- FLAG_allow_natives_syntax = true;
-#ifdef VERIFY_HEAP
- FLAG_verify_heap = true;
-#endif
-
- CcTest::InitializeVM();
- v8::HandleScope handle_scope(CcTest::isolate());
-
- v8::Local<v8::String> underlying =
- CompileRun(
- "var str = 'abcdefghijklmnopqrstuvwxyz';"
- "str")
- ->ToString(CcTest::isolate()->GetCurrentContext())
- .ToLocalChecked();
- CHECK(v8::Utils::OpenHandle(*underlying)->IsSeqOneByteString());
-
- const int length = underlying->Length();
- uc16* two_byte = NewArray<uc16>(length + 1);
- underlying->Write(two_byte);
-
- Resource* resource = new Resource(two_byte, length);
- CHECK(underlying->MakeExternal(resource));
- CHECK(v8::Utils::OpenHandle(*underlying)->IsExternalTwoByteString());
-
- v8::Local<v8::Script> script = v8_compile(v8_str("%_SubString(str, 5, 8)"));
-
- // Trigger a GC on string allocation.
- i::heap::SimulateFullSpace(CcTest::heap()->new_space());
-
- v8::Local<v8::Value> result;
- CHECK(script->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocal(&result));
- Handle<String> string = v8::Utils::OpenHandle(v8::String::Cast(*result));
- CHECK_EQ(0, strcmp("fgh", string->ToCString().get()));
-}
-
namespace {
int* global_use_counts = nullptr;
@@ -1505,7 +1412,7 @@ static uint16_t ConvertLatin1(uint16_t c) {
#ifndef V8_INTL_SUPPORT
static void CheckCanonicalEquivalence(uint16_t c, uint16_t test) {
uint16_t expect = ConvertLatin1<unibrow::Ecma262UnCanonicalize, true>(c);
- if (expect > unibrow::Latin1::kMaxChar) expect = 0;
+ if (expect > unibrow::Latin1::kMaxChar || expect == 0) expect = c;
CHECK_EQ(expect, test);
}
@@ -1514,7 +1421,7 @@ TEST(Latin1IgnoreCase) {
for (uint16_t c = unibrow::Latin1::kMaxChar + 1; c != 0; c++) {
uint16_t lower = ConvertLatin1<unibrow::ToLowercase, false>(c);
uint16_t upper = ConvertLatin1<unibrow::ToUppercase, false>(c);
- uint16_t test = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
+ uint16_t test = unibrow::Latin1::TryConvertToLatin1(c);
// Filter out all character whose upper is not their lower or vice versa.
if (lower == 0 && upper == 0) {
CheckCanonicalEquivalence(c, test);
@@ -1675,6 +1582,63 @@ TEST(ExternalStringIndexOf) {
.FromJust());
}
+#define GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(NAME, STRING) \
+ TEST(GCInsideNewStringFromUtf8SubStringWith##NAME) { \
+ CcTest::InitializeVM(); \
+ LocalContext context; \
+ v8::HandleScope scope(CcTest::isolate()); \
+ Factory* factory = CcTest::i_isolate()->factory(); \
+ Heap* heap = CcTest::i_isolate()->heap(); \
+ /* Length must be bigger than the buffer size of the Utf8Decoder. */ \
+ const char* buf = STRING; \
+ size_t len = strlen(buf); \
+ Handle<String> main_string = \
+ factory \
+ ->NewStringFromOneByte(Vector<const uint8_t>( \
+ reinterpret_cast<const uint8_t*>(buf), len)) \
+ .ToHandleChecked(); \
+ CHECK(heap->InNewSpace(*main_string)); \
+ /* Next allocation will cause GC. */ \
+ heap::SimulateFullSpace(CcTest::i_isolate()->heap()->new_space()); \
+ /* Offset by two to check substring-ing. */ \
+ Handle<String> s = factory \
+ ->NewStringFromUtf8SubString( \
+ Handle<SeqOneByteString>::cast(main_string), 2, \
+ static_cast<int>(len - 2)) \
+ .ToHandleChecked(); \
+ Handle<String> expected_string = \
+ factory->NewStringFromUtf8(Vector<const char>(buf + 2, len - 2)) \
+ .ToHandleChecked(); \
+ CHECK(s->Equals(*expected_string)); \
+ }
+
+GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(
+ OneByte,
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
+GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(
+ TwoByte,
+ "QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ"
+ "QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D")
+
+#undef GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING
+
} // namespace test_strings
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 21bdb645a5..50e9018939 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -615,3 +615,45 @@ TEST(TerminateConsole) {
CHECK(try_catch.HasCaught());
CHECK(!isolate->IsExecutionTerminating());
}
+
+class TerminatorSleeperThread : public v8::base::Thread {
+ public:
+ explicit TerminatorSleeperThread(v8::Isolate* isolate, int sleep_ms)
+ : Thread(Options("TerminatorSlepperThread")),
+ isolate_(isolate),
+ sleep_ms_(sleep_ms) {}
+ void Run() {
+ v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(sleep_ms_));
+ CHECK(!isolate_->IsExecutionTerminating());
+ isolate_->TerminateExecution();
+ }
+
+ private:
+ v8::Isolate* isolate_;
+ int sleep_ms_;
+};
+
+TEST(TerminateRegExp) {
+// regexp interpreter does not support preemption.
+#ifndef V8_INTERPRETED_REGEXP
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ ConsoleImpl console;
+ v8::debug::SetConsoleDelegate(isolate, &console);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ isolate, TerminateCurrentThread, DoLoopCancelTerminate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
+ v8::Context::Scope context_scope(context);
+ CHECK(!isolate->IsExecutionTerminating());
+ v8::TryCatch try_catch(isolate);
+ CHECK(!isolate->IsExecutionTerminating());
+ CHECK(!CompileRun("var re = /(x+)+y$/; re.test('x');").IsEmpty());
+ TerminatorSleeperThread terminator(isolate, 100);
+ terminator.Start();
+ CHECK(CompileRun("re.test('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'); fail();")
+ .IsEmpty());
+ CHECK(try_catch.HasCaught());
+ CHECK(!isolate->IsExecutionTerminating());
+#endif // V8_INTERPRETED_REGEXP
+}
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index c785b45022..7ec9197b65 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -87,6 +87,8 @@ TEST(AllocateNotExternal) {
void TestSpeciesProtector(char* code,
bool invalidates_species_protector = true) {
+ // Make BigInt64Array/BigUint64Array available for testing.
+ FLAG_harmony_bigint = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
std::string typed_array_constructors[] = {
@@ -108,22 +110,24 @@ void TestSpeciesProtector(char* code,
CompileRun(("let constructor = " + constructor + ";").c_str());
v8::Local<v8::Value> constructor_obj = CompileRun(constructor.c_str());
CHECK_EQ(constructor_obj, CompileRun("x.slice().constructor"));
+ CHECK_EQ(constructor_obj, CompileRun("x.subarray().constructor"));
CHECK_EQ(constructor_obj, CompileRun("x.map(()=>{}).constructor"));
std::string decl = "class MyTypedArray extends " + constructor + " { }";
CompileRun(decl.c_str());
v8::internal::Isolate* i_isolate =
reinterpret_cast<v8::internal::Isolate*>(isolate);
- CHECK(i_isolate->IsArraySpeciesLookupChainIntact());
+ CHECK(i_isolate->IsSpeciesLookupChainIntact());
CompileRun(code);
if (invalidates_species_protector) {
- CHECK(!i_isolate->IsArraySpeciesLookupChainIntact());
+ CHECK(!i_isolate->IsSpeciesLookupChainIntact());
} else {
- CHECK(i_isolate->IsArraySpeciesLookupChainIntact());
+ CHECK(i_isolate->IsSpeciesLookupChainIntact());
}
v8::Local<v8::Value> my_typed_array = CompileRun("MyTypedArray");
CHECK_EQ(my_typed_array, CompileRun("x.slice().constructor"));
+ CHECK_EQ(my_typed_array, CompileRun("x.subarray().constructor"));
CHECK_EQ(my_typed_array, CompileRun("x.map(()=>{}).constructor"));
}
isolate->Exit();
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index e8bc58ffff..ac647c5c64 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -286,8 +286,8 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
if (reg_list & (1UL << i)) {
Register xn = Register::Create(i, kXRegSizeInBits);
- // We should never write into csp here.
- CHECK(!xn.Is(csp));
+ // We should never write into sp here.
+ CHECK(!xn.Is(sp));
if (!xn.IsZero()) {
if (!first.IsValid()) {
// This is the first register we've hit, so construct the literal.
@@ -337,8 +337,6 @@ void Clobber(MacroAssembler* masm, CPURegList reg_list) {
void RegisterDump::Dump(MacroAssembler* masm) {
- CHECK(__ StackPointer().Is(csp));
-
// Ensure that we don't unintentionally clobber any registers.
RegList old_tmp_list = masm->TmpList()->list();
RegList old_fptmp_list = masm->FPTmpList()->list();
@@ -368,13 +366,13 @@ void RegisterDump::Dump(MacroAssembler* masm) {
// Load the address where we will dump the state.
__ Mov(dump_base, reinterpret_cast<uint64_t>(&dump_));
- // Dump the stack pointer (csp and wcsp).
+ // Dump the stack pointer (sp and wsp).
// The stack pointer cannot be stored directly; it needs to be moved into
// another register first. Also, we pushed four X registers, so we need to
// compensate here.
- __ Add(tmp, csp, 4 * kXRegSize);
+ __ Add(tmp, sp, 4 * kXRegSize);
__ Str(tmp, MemOperand(dump_base, sp_offset));
- __ Add(tmp_w, wcsp, 4 * kXRegSize);
+ __ Add(tmp_w, wsp, 4 * kXRegSize);
__ Str(tmp_w, MemOperand(dump_base, wsp_offset));
// Dump X registers.
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index 78b266cb0b..e5b91fb280 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -52,8 +52,7 @@ class RegisterDump {
RegisterDump() : completed_(false) {}
// The Dump method generates code to store a snapshot of the register values.
- // It needs to be able to use the stack temporarily, and requires that the
- // current stack pointer is csp, and is properly aligned.
+ // It needs to be able to use the stack temporarily.
//
// The dumping code is generated though the given MacroAssembler. No registers
// are corrupted in the process, but the stack is used briefly. The flags will
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index 28d1ab27f9..d79d6e4eb4 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -37,14 +37,14 @@ SHELL = 'cctest'
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, SHELL))
+ def ListTests(self):
+ shell = os.path.abspath(os.path.join(self.test_config.shell_dir, SHELL))
if utils.IsWindows():
shell += ".exe"
cmd = command.Command(
- cmd_prefix=context.command_prefix,
+ cmd_prefix=self.test_config.command_prefix,
shell=shell,
- args=["--list"] + context.extra_flags)
+ args=["--list"] + self.test_config.extra_flags)
output = cmd.execute()
if output.exit_code != 0:
print cmd
@@ -63,9 +63,9 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return SHELL
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [self.path]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
index b20c4ae9df..bfebd34c06 100644
--- a/deps/v8/test/cctest/trace-extension.h
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -28,7 +28,8 @@
#ifndef V8_TEST_CCTEST_TRACE_EXTENSION_H_
#define V8_TEST_CCTEST_TRACE_EXTENSION_H_
-#include "src/v8.h"
+#include "include/v8.h"
+#include "src/globals.h"
namespace v8 {
struct TickSample;
diff --git a/deps/v8/test/cctest/wasm/OWNERS b/deps/v8/test/cctest/wasm/OWNERS
index 972daf4d99..3972f0dd99 100644
--- a/deps/v8/test/cctest/wasm/OWNERS
+++ b/deps/v8/test/cctest/wasm/OWNERS
@@ -2,8 +2,6 @@ ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
eholk@chromium.org
-mtrofin@chromium.org
-rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index f5c09b519e..b772f2d619 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -67,6 +67,7 @@ class CWasmEntryArgTester {
? Handle<Object>::cast(isolate_->factory()->NewForeign(
wasm_code_.GetWasmCode()->instructions().start(), TENURED))
: Handle<Object>::cast(wasm_code_.GetCode())),
+ handle(reinterpret_cast<Object*>(wasm_code_.wasm_context()), isolate_),
buffer_obj};
static_assert(
arraysize(call_args) == compiler::CWasmEntryParameters::kNumParameters,
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 3ded63730b..7fa82475ae 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -35,66 +35,30 @@ namespace test_run_wasm_64 {
#define MIPS false
#endif
-#define FOREACH_I64_OPERATOR(V) \
- V(DepthFirst, true) \
- V(I64Phi, true) \
- V(I64Const, true) \
- V(I64Return, true) \
- V(I64Param, true) \
- V(I64LoadStore, true) \
- V(I64Add, true) \
- V(I64Sub, true) \
- V(I64Mul, !MIPS) \
- V(I64DivS, true) \
- V(I64DivU, true) \
- V(I64RemS, true) \
- V(I64RemU, true) \
- V(I64And, true) \
- V(I64Ior, true) \
- V(I64Xor, true) \
- V(I64Shl, true) \
- V(I64ShrU, true) \
- V(I64ShrS, true) \
- V(I64Eq, true) \
- V(I64Ne, true) \
- V(I64LtS, true) \
- V(I64LeS, true) \
- V(I64LtU, true) \
- V(I64LeU, true) \
- V(I64GtS, true) \
- V(I64GeS, true) \
- V(I64GtU, true) \
- V(I64GeU, true) \
- V(I64Ctz, true) \
- V(I64Clz, true) \
- V(I64Popcnt, true) \
- V(I32ConvertI64, true) \
- V(I64SConvertF32, true) \
- V(I64SConvertF64, true) \
- V(I64UConvertF32, true) \
- V(I64UConvertF64, true) \
- V(I64SConvertI32, true) \
- V(I64UConvertI32, true) \
- V(F32SConvertI64, true) \
- V(F32UConvertI64, true) \
- V(F64SConvertI64, true) \
- V(F64UConvertI64, true) \
- V(F64ReinterpretI64, true) \
- V(I64ReinterpretF64, true) \
- V(I64Ror, true) \
- V(I64Rol, true)
-
-#define DECLARE_CONST(name, cond) static const bool kSupported_##name = cond;
-FOREACH_I64_OPERATOR(DECLARE_CONST)
-#undef DECLARE_CONST
-
-#undef FOREACH_I64_OPERATOR
+namespace {
+
+#define FOREACH_UNSUPPORTED_OPCODE(V) V(I64Mul, !WASM_64 && MIPS)
+
+bool SupportsOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CASE_TEST(name, unsupported_test) \
+ case kExpr##name: \
+ return !(unsupported_test);
+ FOREACH_UNSUPPORTED_OPCODE(CASE_TEST)
+#undef CASE_TEST
+ default:
+ return true;
+ }
+}
+
+#undef FOREACH_UNSUPPORTED_OPCODE
+
+} // namespace
#define REQUIRE(name) \
- if (!WASM_64 && !kSupported_##name) return
+ if (!SupportsOpcode(kExpr##name)) return
WASM_EXEC_TEST(I64Const) {
- REQUIRE(I64Const);
WasmRunner<int64_t> r(execution_mode);
const int64_t kExpectedValue = 0x1122334455667788LL;
// return(kExpectedValue)
@@ -103,7 +67,6 @@ WASM_EXEC_TEST(I64Const) {
}
WASM_EXEC_TEST(I64Const_many) {
- REQUIRE(I64Const);
int cntr = 0;
FOR_INT32_INPUTS(i) {
WasmRunner<int64_t> r(execution_mode);
@@ -116,7 +79,6 @@ WASM_EXEC_TEST(I64Const_many) {
}
WASM_EXEC_TEST(Return_I64) {
- REQUIRE(I64Return);
WasmRunner<int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_RETURN1(WASM_GET_LOCAL(0)));
@@ -125,7 +87,6 @@ WASM_EXEC_TEST(Return_I64) {
}
WASM_EXEC_TEST(I64Add) {
- REQUIRE(I64Add);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -139,7 +100,6 @@ WASM_EXEC_TEST(I64Add) {
const int64_t kHasBit33On = 0x100000000;
WASM_EXEC_TEST(Regress5800_Add) {
- REQUIRE(I64Add);
WasmRunner<int32_t> r(execution_mode);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_ADD(
WASM_I64V(0), WASM_I64V(kHasBit33On)))),
@@ -149,7 +109,6 @@ WASM_EXEC_TEST(Regress5800_Add) {
}
WASM_EXEC_TEST(I64Sub) {
- REQUIRE(I64Sub);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -158,7 +117,6 @@ WASM_EXEC_TEST(I64Sub) {
}
WASM_EXEC_TEST(Regress5800_Sub) {
- REQUIRE(I64Sub);
WasmRunner<int32_t> r(execution_mode);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_SUB(
WASM_I64V(0), WASM_I64V(kHasBit33On)))),
@@ -168,8 +126,6 @@ WASM_EXEC_TEST(Regress5800_Sub) {
}
WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
- REQUIRE(I64Add);
- REQUIRE(I32ConvertI64);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -181,8 +137,6 @@ WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
- REQUIRE(I64Sub);
- REQUIRE(I32ConvertI64);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -195,7 +149,6 @@ WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
REQUIRE(I64Mul);
- REQUIRE(I32ConvertI64);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -207,8 +160,6 @@ WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
- REQUIRE(I64Shl);
- REQUIRE(I32ConvertI64);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -221,8 +172,6 @@ WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
- REQUIRE(I64ShrU);
- REQUIRE(I32ConvertI64);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -235,8 +184,6 @@ WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
- REQUIRE(I64ShrS);
- REQUIRE(I32ConvertI64);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -249,7 +196,6 @@ WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64DivS) {
- REQUIRE(I64DivS);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -266,7 +212,6 @@ WASM_EXEC_TEST(I64DivS) {
}
WASM_EXEC_TEST(I64DivS_Trap) {
- REQUIRE(I64DivS);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(0, r.Call(int64_t{0}, int64_t{100}));
@@ -277,7 +222,6 @@ WASM_EXEC_TEST(I64DivS_Trap) {
}
WASM_EXEC_TEST(I64DivS_Byzero_Const) {
- REQUIRE(I64DivS);
for (int8_t denom = -2; denom < 8; denom++) {
WasmRunner<int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
@@ -292,7 +236,6 @@ WASM_EXEC_TEST(I64DivS_Byzero_Const) {
}
WASM_EXEC_TEST(I64DivU) {
- REQUIRE(I64DivU);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -307,7 +250,6 @@ WASM_EXEC_TEST(I64DivU) {
}
WASM_EXEC_TEST(I64DivU_Trap) {
- REQUIRE(I64DivU);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(0, r.Call(uint64_t{0}, uint64_t{100}));
@@ -317,7 +259,6 @@ WASM_EXEC_TEST(I64DivU_Trap) {
}
WASM_EXEC_TEST(I64DivU_Byzero_Const) {
- REQUIRE(I64DivU);
for (uint64_t denom = 0xFFFFFFFFFFFFFFFE; denom < 8; denom++) {
WasmRunner<uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
@@ -333,7 +274,6 @@ WASM_EXEC_TEST(I64DivU_Byzero_Const) {
}
WASM_EXEC_TEST(I64RemS) {
- REQUIRE(I64RemS);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -348,7 +288,6 @@ WASM_EXEC_TEST(I64RemS) {
}
WASM_EXEC_TEST(I64RemS_Trap) {
- REQUIRE(I64RemS);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(33, r.Call(int64_t{133}, int64_t{100}));
@@ -359,7 +298,6 @@ WASM_EXEC_TEST(I64RemS_Trap) {
}
WASM_EXEC_TEST(I64RemU) {
- REQUIRE(I64RemU);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -374,7 +312,6 @@ WASM_EXEC_TEST(I64RemU) {
}
WASM_EXEC_TEST(I64RemU_Trap) {
- REQUIRE(I64RemU);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(17, r.Call(uint64_t{217}, uint64_t{100}));
@@ -384,7 +321,6 @@ WASM_EXEC_TEST(I64RemU_Trap) {
}
WASM_EXEC_TEST(I64And) {
- REQUIRE(I64And);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -393,7 +329,6 @@ WASM_EXEC_TEST(I64And) {
}
WASM_EXEC_TEST(I64Ior) {
- REQUIRE(I64Ior);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -402,7 +337,6 @@ WASM_EXEC_TEST(I64Ior) {
}
WASM_EXEC_TEST(I64Xor) {
- REQUIRE(I64Xor);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -411,7 +345,6 @@ WASM_EXEC_TEST(I64Xor) {
}
WASM_EXEC_TEST(I64Shl) {
- REQUIRE(I64Shl);
{
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -446,7 +379,6 @@ WASM_EXEC_TEST(I64Shl) {
}
WASM_EXEC_TEST(I64ShrU) {
- REQUIRE(I64ShrU);
{
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -481,7 +413,6 @@ WASM_EXEC_TEST(I64ShrU) {
}
WASM_EXEC_TEST(I64ShrS) {
- REQUIRE(I64ShrS);
{
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -516,7 +447,6 @@ WASM_EXEC_TEST(I64ShrS) {
}
WASM_EXEC_TEST(I64Eq) {
- REQUIRE(I64Eq);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -525,7 +455,6 @@ WASM_EXEC_TEST(I64Eq) {
}
WASM_EXEC_TEST(I64Ne) {
- REQUIRE(I64Ne);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_NE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -534,7 +463,6 @@ WASM_EXEC_TEST(I64Ne) {
}
WASM_EXEC_TEST(I64LtS) {
- REQUIRE(I64LtS);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -543,7 +471,6 @@ WASM_EXEC_TEST(I64LtS) {
}
WASM_EXEC_TEST(I64LeS) {
- REQUIRE(I64LeS);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_LES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -552,7 +479,6 @@ WASM_EXEC_TEST(I64LeS) {
}
WASM_EXEC_TEST(I64LtU) {
- REQUIRE(I64LtU);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -561,7 +487,6 @@ WASM_EXEC_TEST(I64LtU) {
}
WASM_EXEC_TEST(I64LeU) {
- REQUIRE(I64LeU);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -570,7 +495,6 @@ WASM_EXEC_TEST(I64LeU) {
}
WASM_EXEC_TEST(I64GtS) {
- REQUIRE(I64GtS);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -579,7 +503,6 @@ WASM_EXEC_TEST(I64GtS) {
}
WASM_EXEC_TEST(I64GeS) {
- REQUIRE(I64GeS);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_GES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -588,7 +511,6 @@ WASM_EXEC_TEST(I64GeS) {
}
WASM_EXEC_TEST(I64GtU) {
- REQUIRE(I64GtU);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -597,7 +519,6 @@ WASM_EXEC_TEST(I64GtU) {
}
WASM_EXEC_TEST(I64GeU) {
- REQUIRE(I64GeU);
WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -606,7 +527,6 @@ WASM_EXEC_TEST(I64GeU) {
}
WASM_EXEC_TEST(I32ConvertI64) {
- REQUIRE(I32ConvertI64);
FOR_INT64_INPUTS(i) {
WasmRunner<int32_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(WASM_I64V(*i)));
@@ -615,14 +535,12 @@ WASM_EXEC_TEST(I32ConvertI64) {
}
WASM_EXEC_TEST(I64SConvertI32) {
- REQUIRE(I64SConvertI32);
WasmRunner<int64_t, int32_t> r(execution_mode);
BUILD(r, WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(I64UConvertI32) {
- REQUIRE(I64UConvertI32);
WasmRunner<int64_t, uint32_t> r(execution_mode);
BUILD(r, WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(0)));
FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
@@ -646,14 +564,12 @@ WASM_EXEC_TEST(I64Popcnt) {
}
WASM_EXEC_TEST(F32SConvertI64) {
- REQUIRE(F32SConvertI64);
WasmRunner<float, int64_t> r(execution_mode);
BUILD(r, WASM_F32_SCONVERT_I64(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) { CHECK_FLOAT_EQ(static_cast<float>(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F32UConvertI64) {
- REQUIRE(F32UConvertI64);
struct {
uint64_t input;
uint32_t expected;
@@ -741,14 +657,12 @@ WASM_EXEC_TEST(F32UConvertI64) {
}
WASM_EXEC_TEST(F64SConvertI64) {
- REQUIRE(F64SConvertI64);
WasmRunner<double, int64_t> r(execution_mode);
BUILD(r, WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F64UConvertI64) {
- REQUIRE(F64UConvertI64);
struct {
uint64_t input;
uint64_t expected;
@@ -834,7 +748,7 @@ WASM_EXEC_TEST(F64UConvertI64) {
}
}
-WASM_EXEC_TEST(I64SConvertF32a) {
+WASM_EXEC_TEST(I64SConvertF32) {
WasmRunner<int64_t, float> r(execution_mode);
BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
@@ -848,7 +762,28 @@ WASM_EXEC_TEST(I64SConvertF32a) {
}
}
-WASM_EXEC_TEST(I64SConvertF64a) {
+WASM_EXEC_TEST(I64SConvertSatF32) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<int64_t, float> r(execution_mode);
+ BUILD(r, WASM_I64_SCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
+ FOR_FLOAT32_INPUTS(i) {
+ int64_t expected;
+ if (*i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
+ *i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ expected = static_cast<int64_t>(*i);
+ } else if (std::isnan(*i)) {
+ expected = static_cast<int64_t>(0);
+ } else if (*i < 0.0) {
+ expected = std::numeric_limits<int64_t>::min();
+ } else {
+ expected = std::numeric_limits<int64_t>::max();
+ }
+ int64_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
+WASM_EXEC_TEST(I64SConvertF64) {
WasmRunner<int64_t, double> r(execution_mode);
BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
@@ -862,7 +797,28 @@ WASM_EXEC_TEST(I64SConvertF64a) {
}
}
-WASM_EXEC_TEST(I64UConvertF32a) {
+WASM_EXEC_TEST(I64SConvertSatF64) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<int64_t, double> r(execution_mode);
+ BUILD(r, WASM_I64_SCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
+ FOR_FLOAT64_INPUTS(i) {
+ int64_t expected;
+ if (*i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
+ *i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ expected = static_cast<int64_t>(*i);
+ } else if (std::isnan(*i)) {
+ expected = static_cast<int64_t>(0);
+ } else if (*i < 0.0) {
+ expected = std::numeric_limits<int64_t>::min();
+ } else {
+ expected = std::numeric_limits<int64_t>::max();
+ }
+ int64_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
+WASM_EXEC_TEST(I64UConvertF32) {
WasmRunner<uint64_t, float> r(execution_mode);
BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
@@ -876,7 +832,28 @@ WASM_EXEC_TEST(I64UConvertF32a) {
}
}
-WASM_EXEC_TEST(I64UConvertF64a) {
+WASM_EXEC_TEST(I64UConvertSatF32) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<int64_t, float> r(execution_mode);
+ BUILD(r, WASM_I64_UCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
+ FOR_FLOAT32_INPUTS(i) {
+ uint64_t expected;
+ if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ *i > -1) {
+ expected = static_cast<uint64_t>(*i);
+ } else if (std::isnan(*i)) {
+ expected = static_cast<uint64_t>(0);
+ } else if (*i < 0.0) {
+ expected = std::numeric_limits<uint64_t>::min();
+ } else {
+ expected = std::numeric_limits<uint64_t>::max();
+ }
+ uint64_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
+WASM_EXEC_TEST(I64UConvertF64) {
WasmRunner<uint64_t, double> r(execution_mode);
BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
@@ -890,6 +867,27 @@ WASM_EXEC_TEST(I64UConvertF64a) {
}
}
+WASM_EXEC_TEST(I64UConvertSatF64) {
+ EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
+ WasmRunner<int64_t, double> r(execution_mode);
+ BUILD(r, WASM_I64_UCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
+ FOR_FLOAT64_INPUTS(i) {
+ int64_t expected;
+ if (*i < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ *i > -1) {
+ expected = static_cast<uint64_t>(*i);
+ } else if (std::isnan(*i)) {
+ expected = static_cast<uint64_t>(0);
+ } else if (*i < 0.0) {
+ expected = std::numeric_limits<uint64_t>::min();
+ } else {
+ expected = std::numeric_limits<uint64_t>::max();
+ }
+ int64_t found = r.Call(*i);
+ CHECK_EQ(expected, found);
+ }
+}
+
WASM_EXEC_TEST(CallI64Parameter) {
ValueType param_types[20];
for (int i = 0; i < 20; i++) param_types[i] = kWasmI64;
@@ -975,7 +973,7 @@ void TestI64Cmp(WasmExecutionMode execution_mode, WasmOpcode opcode,
#define TEST_I64_BINOP(name, expected, a, b) \
do { \
- if (WASM_64 || kSupported_##name) \
+ if (SupportsOpcode(kExpr##name)) \
TestI64Binop(execution_mode, kExpr##name, expected, a, b); \
} while (false)
@@ -1015,7 +1013,7 @@ WASM_EXEC_TEST(I64Binops) {
#define TEST_I64_CMP(name, expected, a, b) \
do { \
- if (WASM_64 || kSupported_##name) \
+ if (SupportsOpcode(kExpr##name)) \
TestI64Cmp(execution_mode, kExpr##name, expected, a, b); \
} while (false)
@@ -1035,7 +1033,6 @@ WASM_EXEC_TEST(I64Compare) {
#undef TEST_I64_CMP
WASM_EXEC_TEST(I64Clz) {
- REQUIRE(I64Clz);
struct {
int64_t expected;
uint64_t input;
@@ -1081,7 +1078,6 @@ WASM_EXEC_TEST(I64Clz) {
}
WASM_EXEC_TEST(I64Ctz) {
- REQUIRE(I64Ctz);
struct {
int64_t expected;
uint64_t input;
@@ -1127,7 +1123,6 @@ WASM_EXEC_TEST(I64Ctz) {
}
WASM_EXEC_TEST(I64Popcnt2) {
- REQUIRE(I64Popcnt);
struct {
int64_t expected;
uint64_t input;
@@ -1147,8 +1142,6 @@ WASM_EXEC_TEST(I64Popcnt2) {
// Test the WasmRunner with an Int64 return value and different numbers of
// Int64 parameters.
WASM_EXEC_TEST(I64WasmRunner) {
- REQUIRE(I64Param);
- REQUIRE(I64Xor);
{FOR_INT64_INPUTS(i){WasmRunner<int64_t> r(execution_mode);
BUILD(r, WASM_I64V(*i));
CHECK_EQ(*i, r.Call());
@@ -1196,7 +1189,6 @@ WASM_EXEC_TEST(I64WasmRunner) {
}
WASM_EXEC_TEST(Call_Int64Sub) {
- REQUIRE(I64Sub);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
// Build the target function.
TestSignatures sigs;
@@ -1222,8 +1214,6 @@ WASM_EXEC_TEST(Call_Int64Sub) {
}
WASM_EXEC_TEST(LoadStoreI64_sx) {
- REQUIRE(I64LoadStore);
- REQUIRE(DepthFirst);
byte loads[] = {kExprI64LoadMem8S, kExprI64LoadMem16S, kExprI64LoadMem32S,
kExprI64LoadMem};
@@ -1265,66 +1255,8 @@ WASM_EXEC_TEST(LoadStoreI64_sx) {
}
}
-WASM_EXEC_TEST(I64SConvertF32b) {
- REQUIRE(I64SConvertF32);
- WasmRunner<int64_t, float> r(execution_mode);
- BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(INT64_MAX) &&
- *i >= static_cast<float>(INT64_MIN)) {
- CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
-
-WASM_EXEC_TEST(I64SConvertF64b) {
- REQUIRE(I64SConvertF64);
- WasmRunner<int64_t, double> r(execution_mode);
- BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<double>(INT64_MAX) &&
- *i >= static_cast<double>(INT64_MIN)) {
- CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
-
-WASM_EXEC_TEST(I64UConvertF32b) {
- REQUIRE(I64UConvertF32);
- WasmRunner<uint64_t, float> r(execution_mode);
- BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
- CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
-
-WASM_EXEC_TEST(I64UConvertF64b) {
- REQUIRE(I64UConvertF64);
- WasmRunner<uint64_t, double> r(execution_mode);
- BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
-
- FOR_FLOAT64_INPUTS(i) {
- if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
- CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
- } else {
- CHECK_TRAP64(r.Call(*i));
- }
- }
-}
WASM_EXEC_TEST(I64ReinterpretF64) {
- REQUIRE(I64ReinterpretF64);
WasmRunner<int64_t> r(execution_mode);
int64_t* memory = r.builder().AddMemoryElems<int64_t>(8);
@@ -1339,7 +1271,6 @@ WASM_EXEC_TEST(I64ReinterpretF64) {
}
WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
- REQUIRE(I64ReinterpretF64);
WasmRunner<int64_t> r(execution_mode);
BUILD(r, WASM_I64_REINTERPRET_F64(WASM_SEQ(kExprF64Const, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xF4, 0x7F)));
@@ -1349,7 +1280,6 @@ WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
}
WASM_EXEC_TEST(F64ReinterpretI64) {
- REQUIRE(F64ReinterpretI64);
WasmRunner<int64_t, int64_t> r(execution_mode);
int64_t* memory = r.builder().AddMemoryElems<int64_t>(8);
@@ -1365,7 +1295,6 @@ WASM_EXEC_TEST(F64ReinterpretI64) {
}
WASM_EXEC_TEST(LoadMemI64) {
- REQUIRE(I64LoadStore);
WasmRunner<int64_t> r(execution_mode);
int64_t* memory = r.builder().AddMemoryElems<int64_t>(8);
r.builder().RandomizeMemory(1111);
@@ -1383,7 +1312,6 @@ WASM_EXEC_TEST(LoadMemI64) {
}
WASM_EXEC_TEST(LoadMemI64_alignment) {
- REQUIRE(I64LoadStore);
for (byte alignment = 0; alignment <= 3; alignment++) {
WasmRunner<int64_t> r(execution_mode);
int64_t* memory = r.builder().AddMemoryElems<int64_t>(8);
@@ -1404,10 +1332,6 @@ WASM_EXEC_TEST(LoadMemI64_alignment) {
}
WASM_EXEC_TEST(MemI64_Sum) {
- REQUIRE(I64LoadStore);
- REQUIRE(I64Add);
- REQUIRE(I64Sub);
- REQUIRE(I64Phi);
const int kNumElems = 20;
WasmRunner<uint64_t, int32_t> r(execution_mode);
uint64_t* memory = r.builder().AddMemoryElems<uint64_t>(kNumElems);
@@ -1454,10 +1378,6 @@ WASM_EXEC_TEST(StoreMemI64_alignment) {
}
WASM_EXEC_TEST(I64Global) {
- REQUIRE(I64LoadStore);
- REQUIRE(I64SConvertI32);
- REQUIRE(I64And);
- REQUIRE(DepthFirst);
WasmRunner<int32_t, int32_t> r(execution_mode);
int64_t* global = r.builder().AddGlobal<int64_t>();
// global = global + p0
@@ -1475,8 +1395,6 @@ WASM_EXEC_TEST(I64Global) {
}
WASM_EXEC_TEST(I64Eqz) {
- REQUIRE(I64Eq);
-
WasmRunner<int32_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_EQZ(WASM_GET_LOCAL(0)));
@@ -1487,7 +1405,6 @@ WASM_EXEC_TEST(I64Eqz) {
}
WASM_EXEC_TEST(I64Ror) {
- REQUIRE(I64Ror);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -1500,7 +1417,6 @@ WASM_EXEC_TEST(I64Ror) {
}
WASM_EXEC_TEST(I64Rol) {
- REQUIRE(I64Rol);
WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -1703,9 +1619,6 @@ WASM_EXEC_TEST(MixedCall_i64_2) { Run_WasmMixedCall_N(execution_mode, 2); }
WASM_EXEC_TEST(MixedCall_i64_3) { Run_WasmMixedCall_N(execution_mode, 3); }
WASM_EXEC_TEST(Regress5874) {
- REQUIRE(I32ConvertI64);
- REQUIRE(I64LoadStore);
- REQUIRE(I64Const);
WasmRunner<int32_t> r(execution_mode);
r.builder().AddMemoryElems<int64_t>(8);
@@ -1719,7 +1632,6 @@ WASM_EXEC_TEST(Regress5874) {
}
WASM_EXEC_TEST(Regression_6858) {
- REQUIRE(I64DivS);
// WasmRunner with 5 params and returns, which is the maximum.
WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_mode);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index ab40a6366d..b5f7866ee7 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -9,8 +9,8 @@
#include "src/objects-inl.h"
#include "src/snapshot/code-serializer.h"
#include "src/version.h"
-#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -27,6 +27,8 @@ namespace v8 {
namespace internal {
namespace wasm {
+using testing::CompileAndInstantiateForTesting;
+
namespace {
void Cleanup(Isolate* isolate = nullptr) {
// By sending a low memory notifications, we will try hard to collect all
@@ -283,9 +285,11 @@ class WasmSerializationTest {
0);
}
Handle<WasmInstanceObject> instance =
- SyncInstantiate(current_isolate(), &thrower, module_object,
- Handle<JSReceiver>::null(),
- MaybeHandle<JSArrayBuffer>())
+ current_isolate()
+ ->wasm_engine()
+ ->SyncInstantiate(current_isolate(), &thrower, module_object,
+ Handle<JSReceiver>::null(),
+ MaybeHandle<JSArrayBuffer>())
.ToHandleChecked();
Handle<Object> params[1] = {
Handle<Object>(Smi::FromInt(41), current_isolate())};
@@ -330,8 +334,9 @@ class WasmSerializationTest {
testing::SetupIsolateForWasmModule(serialization_isolate);
MaybeHandle<WasmModuleObject> module_object =
- SyncCompile(serialization_isolate, &thrower,
- ModuleWireBytes(buffer.begin(), buffer.end()));
+ serialization_isolate->wasm_engine()->SyncCompile(
+ serialization_isolate, &thrower,
+ ModuleWireBytes(buffer.begin(), buffer.end()));
MaybeHandle<WasmCompiledModule> compiled_module(
module_object.ToHandleChecked()->compiled_module(),
@@ -440,67 +445,6 @@ TEST(DeserializeWireBytesAndSerializedDataInvalid) {
Cleanup();
}
-std::unique_ptr<const uint8_t[]> CreatePayload(const uint8_t* start,
- size_t size) {
- uint8_t* ret = new uint8_t[size];
- memcpy(ret, start, size);
- return std::unique_ptr<const uint8_t[]>(const_cast<const uint8_t*>(ret));
-}
-
-TEST(ModuleBuilder) {
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- ZoneBuffer buffer(&zone);
- WasmSerializationTest::BuildWireBytes(&zone, &buffer);
- CHECK_GT(buffer.size(), 0);
- size_t third = buffer.size() / 3;
- size_t first_mark = third - 2;
- size_t second_mark = buffer.size() - 2 - third;
- CHECK_LT(0, first_mark);
- CHECK(first_mark < second_mark);
- CHECK(second_mark < buffer.size());
- Isolate* i_isolate = CcTest::InitIsolateOnce();
- v8::WasmModuleObjectBuilder builder(CcTest::isolate());
- std::unique_ptr<const uint8_t[]> first_part =
- CreatePayload(buffer.begin(), first_mark);
- std::unique_ptr<const uint8_t[]> second_part =
- CreatePayload(buffer.begin() + first_mark, second_mark - first_mark);
- std::unique_ptr<const uint8_t[]> third_part =
- CreatePayload(buffer.begin() + second_mark, buffer.size() - second_mark);
- builder.OnBytesReceived(first_part.get(), first_mark);
- builder.OnBytesReceived(second_part.get(), second_mark - first_mark);
- builder.OnBytesReceived(third_part.get(), buffer.size() - second_mark);
- {
- HandleScope scope(i_isolate);
- v8::MaybeLocal<v8::WasmCompiledModule> maybe_module = builder.Finish();
- CHECK(!maybe_module.IsEmpty());
- }
-}
-
-TEST(FailingModuleBuilder) {
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- ZoneBuffer buffer(&zone);
- WasmSerializationTest::BuildWireBytes(&zone, &buffer);
- CHECK_GT(buffer.size(), 0);
- size_t third = buffer.size() / 3;
- size_t first_mark = third - 2;
- size_t second_mark = buffer.size() - 2 - third;
- CHECK_LT(0, first_mark);
- CHECK(first_mark < second_mark);
- CHECK(second_mark < buffer.size());
- Isolate* i_isolate = CcTest::InitIsolateOnce();
- v8::WasmModuleObjectBuilder builder(CcTest::isolate());
- std::unique_ptr<const uint8_t[]> first_part =
- CreatePayload(buffer.begin(), first_mark);
- builder.OnBytesReceived(first_part.get(), first_mark);
- {
- HandleScope scope(i_isolate);
- v8::MaybeLocal<v8::WasmCompiledModule> maybe_module = builder.Finish();
- CHECK(maybe_module.IsEmpty());
- }
-}
-
bool False(v8::Local<v8::Context> context, v8::Local<v8::String> source) {
return false;
}
@@ -531,8 +475,10 @@ TEST(TransferrableWasmModules) {
HandleScope scope(from_isolate);
testing::SetupIsolateForWasmModule(from_isolate);
- MaybeHandle<WasmModuleObject> module_object = SyncCompile(
- from_isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ MaybeHandle<WasmModuleObject> module_object =
+ from_isolate->wasm_engine()->SyncCompile(
+ from_isolate, &thrower,
+ ModuleWireBytes(buffer.begin(), buffer.end()));
v8::Local<v8::WasmCompiledModule> v8_module =
v8::Local<v8::WasmCompiledModule>::Cast(v8::Utils::ToLocal(
Handle<JSObject>::cast(module_object.ToHandleChecked())));
@@ -685,9 +631,8 @@ TEST(TestInterruptLoop) {
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
const Handle<WasmInstanceObject> instance =
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(buffer.begin(), buffer.end()),
- {}, {})
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
.ToHandleChecked();
Handle<JSArrayBuffer> memory(instance->memory_object()->array_buffer(),
@@ -768,9 +713,8 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
ErrorThrower thrower(isolate, "Test");
Handle<WasmInstanceObject> instance =
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(buffer.begin(), buffer.end()),
- {}, {})
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
.ToHandleChecked();
// Initial memory size is 16 pages, should trap till index > MemSize on
@@ -816,9 +760,8 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) {
ErrorThrower thrower(isolate, "Test");
Handle<WasmInstanceObject> instance =
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(buffer.begin(), buffer.end()),
- {}, {})
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
.ToHandleChecked();
// Initial memory size is 16 pages, should trap till index > MemSize on
@@ -945,9 +888,8 @@ TEST(InitDataAtTheUpperLimit) {
'c' // data bytes
};
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(data, data + arraysize(data)), {},
- {});
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(data, data + arraysize(data)));
if (thrower.error()) {
thrower.Reify()->Print();
FATAL("compile or instantiate error");
@@ -982,9 +924,8 @@ TEST(EmptyMemoryNonEmptyDataSegment) {
'c' // data bytes
};
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(data, data + arraysize(data)), {},
- {});
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(data, data + arraysize(data)));
// It should not be possible to instantiate this module.
CHECK(thrower.error());
}
@@ -1016,9 +957,8 @@ TEST(EmptyMemoryEmptyDataSegment) {
U32V_1(0), // source size
};
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(data, data + arraysize(data)), {},
- {});
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(data, data + arraysize(data)));
// It should be possible to instantiate this module.
CHECK(!thrower.error());
}
@@ -1050,9 +990,8 @@ TEST(MemoryWithOOBEmptyDataSegment) {
U32V_1(0), // source size
};
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(data, data + arraysize(data)), {},
- {});
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(data, data + arraysize(data)));
// It should not be possible to instantiate this module.
CHECK(thrower.error());
}
@@ -1078,8 +1017,13 @@ struct ManuallyExternalizedBuffer {
}
~ManuallyExternalizedBuffer() {
if (!buffer_->has_guard_region()) {
- isolate_->array_buffer_allocator()->Free(
- allocation_base_, allocation_length_, buffer_->allocation_mode());
+ if (buffer_->allocation_mode() ==
+ ArrayBuffer::Allocator::AllocationMode::kReservation) {
+ CHECK(v8::internal::FreePages(allocation_base_, allocation_length_));
+ } else {
+ isolate_->array_buffer_allocator()->Free(allocation_base_,
+ allocation_length_);
+ }
}
}
};
@@ -1104,9 +1048,8 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
const Handle<WasmInstanceObject> instance =
- SyncCompileAndInstantiate(isolate, &thrower,
- ModuleWireBytes(buffer.begin(), buffer.end()),
- {}, {})
+ CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
.ToHandleChecked();
Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
@@ -1172,6 +1115,28 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
Cleanup();
}
+TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) {
+ // Regresion test for https://crbug.com/813876
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+#if V8_TARGET_ARCH_64_BIT
+ const bool require_guard_regions = trap_handler::IsTrapHandlerEnabled();
+#else
+ constexpr bool require_guard_regions = false;
+#endif
+ Handle<JSArrayBuffer> buffer =
+ wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize, require_guard_regions);
+ CHECK(!buffer.is_null());
+ Handle<WasmMemoryObject> mem = WasmMemoryObject::New(isolate, buffer, 128);
+ auto contents = v8::Utils::ToLocal(buffer)->Externalize();
+ WasmMemoryObject::Grow(isolate, mem, 0);
+ CHECK(FreePages(contents.AllocationBase(), contents.AllocationLength()));
+ // Make sure we can write to the buffer without crashing
+ uint32_t* int_buffer =
+ reinterpret_cast<uint32_t*>(mem->array_buffer()->backing_store());
+ int_buffer[0] = 0;
+}
+
TEST(AtomicOpDisassembly) {
{
EXPERIMENTAL_FLAG_SCOPE(threads);
@@ -1198,8 +1163,9 @@ TEST(AtomicOpDisassembly) {
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
- MaybeHandle<WasmModuleObject> module_object = SyncCompile(
- isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ MaybeHandle<WasmModuleObject> module_object =
+ isolate->wasm_engine()->SyncCompile(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
Handle<WasmCompiledModule> compiled_module(
module_object.ToHandleChecked()->compiled_module(), isolate);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
index 482ab9e905..eb7d01b2c4 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
@@ -50,12 +50,11 @@ WASM_COMPILED_EXEC_TEST(RunPatchWasmContext) {
for (RelocIterator it(*code, filter); !it.done(); it.next()) {
CHECK_EQ(old_wasm_context_address, it.rinfo()->wasm_context_reference());
it.rinfo()->set_wasm_context_reference(
- isolate, reinterpret_cast<Address>(&new_wasm_context));
+ reinterpret_cast<Address>(&new_wasm_context));
patched = true;
}
CHECK(patched);
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->instruction_start(), code->instruction_size());
}
// Run with the new global data.
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc b/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
new file mode 100644
index 0000000000..8e1b88aadd
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
@@ -0,0 +1,74 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+WASM_COMPILED_EXEC_TEST(I32SExtendI8) {
+ EXPERIMENTAL_FLAG_SCOPE(se);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
+ BUILD(r, WASM_I32_SIGN_EXT_I8(WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+ CHECK_EQ(-1, r.Call(-1));
+ CHECK_EQ(0x7a, r.Call(0x7a));
+ CHECK_EQ(-0x80, r.Call(0x80));
+}
+
+WASM_COMPILED_EXEC_TEST(I32SExtendI16) {
+ EXPERIMENTAL_FLAG_SCOPE(se);
+ WasmRunner<int32_t, int32_t> r(execution_mode);
+ BUILD(r, WASM_I32_SIGN_EXT_I16(WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+ CHECK_EQ(-1, r.Call(-1));
+ CHECK_EQ(0x7afa, r.Call(0x7afa));
+ CHECK_EQ(-0x8000, r.Call(0x8000));
+}
+// TODO(gdeepti): Enable tests to run in the interpreter, and on 32 bit
+// platforms after int64 lowering support. Add JS tests once all ops can be run
+// on 32 bit platforms.
+#if V8_TARGET_ARCH_64_BIT
+WASM_COMPILED_EXEC_TEST(I64SExtendI8) {
+ EXPERIMENTAL_FLAG_SCOPE(se);
+ WasmRunner<int64_t, int64_t> r(execution_mode);
+ BUILD(r, WASM_I64_SIGN_EXT_I8(WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+ CHECK_EQ(-1, r.Call(-1));
+ CHECK_EQ(0x7a, r.Call(0x7a));
+ CHECK_EQ(-0x80, r.Call(0x80));
+}
+
+WASM_COMPILED_EXEC_TEST(I64SExtendI16) {
+ EXPERIMENTAL_FLAG_SCOPE(se);
+ WasmRunner<int64_t, int64_t> r(execution_mode);
+ BUILD(r, WASM_I64_SIGN_EXT_I16(WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+ CHECK_EQ(-1, r.Call(-1));
+ CHECK_EQ(0x7afa, r.Call(0x7afa));
+ CHECK_EQ(-0x8000, r.Call(0x8000));
+}
+
+WASM_COMPILED_EXEC_TEST(I64SExtendI32) {
+ EXPERIMENTAL_FLAG_SCOPE(se);
+ WasmRunner<int64_t, int64_t> r(execution_mode);
+ BUILD(r, WASM_I64_SIGN_EXT_I32(WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+ CHECK_EQ(-1, r.Call(-1));
+ CHECK_EQ(0x7fffffff, r.Call(0x7fffffff));
+ CHECK_EQ(-0x80000000LL, r.Call(0x80000000));
+}
+#endif // V8_TARGET_ARCH_64_BIT
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 1a97cdc122..b585b5bd17 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -494,16 +494,12 @@ void RunF32x4UnOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
}
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(F32x4Abs) {
RunF32x4UnOpTest(lower_simd, kExprF32x4Abs, std::abs);
}
WASM_SIMD_TEST(F32x4Neg) {
RunF32x4UnOpTest(lower_simd, kExprF32x4Neg, Negate);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_X64
@@ -938,11 +934,7 @@ WASM_SIMD_TEST(I32x4Neg) {
RunI32x4UnOpTest(lower_simd, kExprI32x4Neg, Negate);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(S128Not) { RunI32x4UnOpTest(lower_simd, kExprS128Not, Not); }
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_IA32
void RunI32x4BinOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int32BinOp expected_op) {
@@ -1468,10 +1460,10 @@ WASM_SIMD_TEST(I8x16LeU) {
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I8x16Mul) { RunI8x16BinOpTest(lower_simd, kExprI8x16Mul, Mul); }
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
+ // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
void RunI8x16ShiftOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
Int8ShiftOp expected_op, int shift) {
@@ -1488,7 +1480,7 @@ void RunI8x16ShiftOpTest(LowerSimd lower_simd, WasmOpcode simd_op,
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
+ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I8x16Shl) {
RunI8x16ShiftOpTest(lower_simd, kExprI8x16Shl, LogicalShiftLeft, 1);
}
@@ -1501,15 +1493,13 @@ WASM_SIMD_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(lower_simd, kExprI8x16ShrU, LogicalShiftRight, 1);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
+ // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
// Test Select by making a mask where the 0th and 3rd lanes are true and the
// rest false, and comparing for non-equality with zero to convert to a boolean
// vector.
#define WASM_SIMD_SELECT_TEST(format) \
- WASM_SIMD_COMPILED_TEST(S##format##Select) { \
+ WASM_SIMD_TEST(S##format##Select) { \
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteTurbofan, lower_simd); \
byte val1 = 0; \
byte val2 = 1; \
@@ -1549,7 +1539,7 @@ WASM_SIMD_SELECT_TEST(8x16)
// Test Select by making a mask where the 0th and 3rd lanes are non-zero and the
// rest 0. The mask is not the result of a comparison op.
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
- WASM_SIMD_COMPILED_TEST(S##format##NonCanonicalSelect) { \
+ WASM_SIMD_TEST(S##format##NonCanonicalSelect) { \
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteTurbofan, \
lower_simd); \
byte val1 = 0; \
@@ -1584,6 +1574,8 @@ WASM_SIMD_NON_CANONICAL_SELECT_TEST(32x4)
WASM_SIMD_NON_CANONICAL_SELECT_TEST(16x8)
WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
// Test binary ops with two lane test patterns, all lanes distinct.
template <typename T>
void RunBinaryLaneOpTest(
@@ -1617,24 +1609,24 @@ void RunBinaryLaneOpTest(
}
}
-WASM_SIMD_COMPILED_TEST(I32x4AddHoriz) {
+WASM_SIMD_TEST(I32x4AddHoriz) {
RunBinaryLaneOpTest<int32_t>(lower_simd, kExprI32x4AddHoriz, {{1, 5, 9, 13}});
}
-WASM_SIMD_COMPILED_TEST(I16x8AddHoriz) {
+WASM_SIMD_TEST(I16x8AddHoriz) {
RunBinaryLaneOpTest<int16_t>(lower_simd, kExprI16x8AddHoriz,
{{1, 5, 9, 13, 17, 21, 25, 29}});
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
- // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
-WASM_SIMD_COMPILED_TEST(F32x4AddHoriz) {
+WASM_SIMD_TEST(F32x4AddHoriz) {
RunBinaryLaneOpTest<float>(lower_simd, kExprF32x4AddHoriz,
{{1.0f, 5.0f, 9.0f, 13.0f}});
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
// Test some regular shuffles that may have special handling on some targets.
// Test a normal and unary versions (where second operand isn't used).
WASM_SIMD_COMPILED_TEST(S32x4Dup) {
@@ -1996,6 +1988,8 @@ WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
WASM_I32V(1), WASM_I32V(0)));
CHECK_EQ(1, r.Call());
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
@@ -2040,8 +2034,6 @@ WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
WASM_I32V(1), WASM_I32V(0)));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdI32x4Local) {
WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
@@ -2097,8 +2089,6 @@ WASM_SIMD_TEST(SimdI32x4For) {
CHECK_EQ(1, r.Call());
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST(SimdF32x4For) {
WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
r.AllocateLocal(kWasmI32);
@@ -2122,8 +2112,6 @@ WASM_SIMD_TEST(SimdF32x4For) {
WASM_GET_LOCAL(0));
CHECK_EQ(1, r.Call());
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64
template <typename T, int numLanes = 4>
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
@@ -2201,8 +2189,6 @@ WASM_SIMD_TEST(SimdI32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 3), 56);
}
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(SimdF32x4GetGlobal) {
WasmRunner<int32_t, int32_t> r(kExecuteTurbofan, lower_simd);
float* global = r.builder().AddGlobal<float>(kWasmS128);
@@ -2243,8 +2229,6 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
CHECK_EQ(GetScalar(global, 2), 32.25f);
CHECK_EQ(GetScalar(global, 3), 65.0f);
}
-#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
- // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
WasmRunner<int32_t> r(kExecuteTurbofan, lower_simd);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 6a7fde6401..b8b0a4bbff 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -1949,6 +1949,9 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
+ // TODO(gdeepti): Enable this test for sign extension opcodes when lowering
+ // is enabled.
+ if (WasmOpcodes::IsSignExtensionOpcode(opcode)) return;
// Enable all optional operators.
compiler::CommonOperatorBuilder common(&zone);
compiler::MachineOperatorBuilder machine(
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 48640ef1e5..49e4b6ac44 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -88,7 +88,6 @@ class BreakHandler : public debug::DebugDelegate {
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<int>&) override {
printf("Break #%d\n", count_);
CHECK_GT(expected_breaks_.size(), count_);
@@ -114,26 +113,6 @@ class BreakHandler : public debug::DebugDelegate {
}
};
-Handle<JSObject> MakeFakeBreakpoint(Isolate* isolate, int position) {
- Handle<JSObject> obj =
- isolate->factory()->NewJSObject(isolate->object_function());
- // Generate an "isTriggered" method that always returns true.
- // This can/must be refactored once we remove remaining JS parts from the
- // debugger (bug 5530).
- Handle<String> source = isolate->factory()->NewStringFromStaticChars("true");
- Handle<Context> context(isolate->context(), isolate);
- Handle<JSFunction> triggered_fun =
- Compiler::GetFunctionFromString(context, source, NO_PARSE_RESTRICTION,
- kNoSourcePosition)
- .ToHandleChecked();
- PropertyDescriptor desc;
- desc.set_value(triggered_fun);
- Handle<String> name =
- isolate->factory()->InternalizeUtf8String(CStrVector("isTriggered"));
- CHECK(JSObject::DefineOwnProperty(isolate, obj, name, &desc, kDontThrow)
- .FromMaybe(false));
- return obj;
-}
void SetBreakpoint(WasmRunnerBase& runner, int function_index, int byte_offset,
int expected_set_byte_offset = -1) {
@@ -143,10 +122,12 @@ void SetBreakpoint(WasmRunnerBase& runner, int function_index, int byte_offset,
if (expected_set_byte_offset == -1) expected_set_byte_offset = byte_offset;
Handle<WasmInstanceObject> instance = runner.builder().instance_object();
Handle<WasmCompiledModule> compiled_module(instance->compiled_module());
- Handle<JSObject> fake_breakpoint_object =
- MakeFakeBreakpoint(runner.main_isolate(), code_offset);
+ static int break_index = 0;
+ Handle<BreakPoint> break_point =
+ runner.main_isolate()->factory()->NewBreakPoint(
+ break_index++, runner.main_isolate()->factory()->empty_string());
CHECK(WasmCompiledModule::SetBreakPoint(compiled_module, &code_offset,
- fake_breakpoint_object));
+ break_point));
int set_byte_offset = code_offset - func_offset;
CHECK_EQ(expected_set_byte_offset, set_byte_offset);
// Also set breakpoint on the debug info of the instance directly, since the
@@ -212,7 +193,6 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
void BreakProgramRequested(v8::Local<v8::Context> paused_context,
v8::Local<v8::Object> exec_state,
- v8::Local<v8::Value> break_points_hit,
const std::vector<int>&) override {
printf("Break #%d\n", count_);
CHECK_GT(expected_values_.size(), count_);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 4fa1fb1c7a..6927dd4cb5 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -159,11 +159,22 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
table_size * compiler::kFunctionTableEntrySize);
function_tables_.push_back(
isolate_->global_handles()->Create(func_table).address());
+
+ if (WASM_CONTEXT_TABLES) {
+ WasmContext* wasm_context = instance_object()->wasm_context()->get();
+ wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
+ calloc(table_size, sizeof(IndirectFunctionTableEntry)));
+ wasm_context->table_size = table_size;
+ for (uint32_t i = 0; i < table_size; i++) {
+ wasm_context->table[i].sig_id = -1;
+ }
+ }
}
void TestingModuleBuilder::PopulateIndirectFunctionTable() {
if (interpret()) return;
// Initialize the fixed arrays in instance->function_tables.
+ WasmContext* wasm_context = instance_object()->wasm_context()->get();
for (uint32_t i = 0; i < function_tables_.size(); i++) {
WasmIndirectFunctionTable& table = test_module_.function_tables[i];
Handle<FixedArray> function_table(
@@ -171,17 +182,16 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
int table_size = static_cast<int>(table.values.size());
for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_.functions[table.values[j]];
- function_table->set(
- compiler::FunctionTableSigOffset(j),
- Smi::FromInt(test_module_.signature_map.Find(function.sig)));
- if (FLAG_wasm_jit_to_native) {
- Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
- native_module_->GetCode(function.func_index)
- ->instructions()
- .start(),
- TENURED);
- function_table->set(compiler::FunctionTableCodeOffset(j),
- *foreign_holder);
+ int sig_id = test_module_.signature_map.Find(function.sig);
+ function_table->set(compiler::FunctionTableSigOffset(j),
+ Smi::FromInt(sig_id));
+ if (WASM_CONTEXT_TABLES) {
+ auto start = native_module_->GetCode(function.func_index)
+ ->instructions()
+ .start();
+ wasm_context->table[j].context = wasm_context;
+ wasm_context->table[j].sig_id = sig_id;
+ wasm_context->table[j].target = start;
} else {
function_table->set(compiler::FunctionTableCodeOffset(j),
*function_code_[function.func_index]);
@@ -315,10 +325,10 @@ WasmFunctionWrapper::WasmFunctionWrapper(Zone* zone, int num_params)
signature_ = sig_builder.Build();
}
-void WasmFunctionWrapper::Init(CallDescriptor* descriptor,
+void WasmFunctionWrapper::Init(CallDescriptor* call_descriptor,
MachineType return_type,
Vector<MachineType> param_types) {
- DCHECK_NOT_NULL(descriptor);
+ DCHECK_NOT_NULL(call_descriptor);
DCHECK_EQ(signature_->parameter_count(), param_types.length() + 1);
// Create the TF graph for the wrapper.
@@ -349,8 +359,8 @@ void WasmFunctionWrapper::Init(CallDescriptor* descriptor,
parameters[parameter_count++] = effect;
parameters[parameter_count++] = graph()->start();
- Node* call =
- graph()->NewNode(common()->Call(descriptor), parameter_count, parameters);
+ Node* call = graph()->NewNode(common()->Call(call_descriptor),
+ parameter_count, parameters);
if (!return_type.IsNone()) {
effect = graph()->NewNode(
@@ -373,7 +383,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
if (code_.is_null()) {
Isolate* isolate = CcTest::InitIsolateOnce();
- CallDescriptor* descriptor =
+ auto call_descriptor =
compiler::Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
if (kPointerSize == 4) {
@@ -394,7 +404,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
CompilationInfo info(ArrayVector("testing"), graph()->zone(),
Code::C_WASM_ENTRY);
code_ = compiler::Pipeline::GenerateCodeForTesting(
- &info, isolate, descriptor, graph(), nullptr);
+ &info, isolate, call_descriptor, graph(), nullptr);
CHECK(!code_.is_null());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index c0ce21533f..56a16fce6a 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -268,16 +268,16 @@ class WasmFunctionWrapper : private compiler::GraphAndBuilders {
public:
WasmFunctionWrapper(Zone* zone, int num_params);
- void Init(CallDescriptor* descriptor, MachineType return_type,
+ void Init(CallDescriptor* call_descriptor, MachineType return_type,
Vector<MachineType> param_types);
template <typename ReturnType, typename... ParamTypes>
- void Init(CallDescriptor* descriptor) {
+ void Init(CallDescriptor* call_descriptor) {
std::array<MachineType, sizeof...(ParamTypes)> param_machine_types{
{MachineTypeForC<ParamTypes>()...}};
Vector<MachineType> param_vec(param_machine_types.data(),
param_machine_types.size());
- Init(descriptor, MachineTypeForC<ReturnType>(), param_vec);
+ Init(call_descriptor, MachineTypeForC<ReturnType>(), param_vec);
}
void SetInnerCode(WasmCodeWrapper code) {
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 83ddaa6b72..02af6e9508 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -564,6 +564,10 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_I32_UCONVERT_SAT_F32(x) x, WASM_NUMERIC_OP(kExprI32UConvertSatF32)
#define WASM_I32_SCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI32SConvertSatF64)
#define WASM_I32_UCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI32UConvertSatF64)
+#define WASM_I64_SCONVERT_SAT_F32(x) x, WASM_NUMERIC_OP(kExprI64SConvertSatF32)
+#define WASM_I64_UCONVERT_SAT_F32(x) x, WASM_NUMERIC_OP(kExprI64UConvertSatF32)
+#define WASM_I64_SCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI64SConvertSatF64)
+#define WASM_I64_UCONVERT_SAT_F64(x) x, WASM_NUMERIC_OP(kExprI64UConvertSatF64)
//------------------------------------------------------------------------------
// Memory Operations.
@@ -616,4 +620,13 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
x, y, WASM_ATOMICS_OP(op), \
static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
+//------------------------------------------------------------------------------
+// Sign Externsion Operations.
+//------------------------------------------------------------------------------
+#define WASM_I32_SIGN_EXT_I8(x) x, kExprI32SExtendI8
+#define WASM_I32_SIGN_EXT_I16(x) x, kExprI32SExtendI16
+#define WASM_I64_SIGN_EXT_I8(x) x, kExprI64SExtendI8
+#define WASM_I64_SIGN_EXT_I16(x) x, kExprI64SExtendI16
+#define WASM_I64_SIGN_EXT_I32(x) x, kExprI64SExtendI32
+
#endif // V8_WASM_MACRO_GEN_H_
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index d89d87005e..046d69e804 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -9,8 +9,8 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/property-descriptor.h"
-#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
@@ -26,6 +26,17 @@ uint32_t GetInitialMemSize(const WasmModule* module) {
return kWasmPageSize * module->initial_pages;
}
+MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
+ MaybeHandle<WasmModuleObject> module =
+ isolate->wasm_engine()->SyncCompile(isolate, thrower, bytes);
+ DCHECK_EQ(thrower->error(), module.is_null());
+ if (module.is_null()) return {};
+
+ return isolate->wasm_engine()->SyncInstantiate(
+ isolate, thrower, module.ToHandleChecked(), {}, {});
+}
+
std::unique_ptr<WasmModule> DecodeWasmModuleForTesting(
Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
const byte* module_end, ModuleOrigin origin, bool verify_functions) {
@@ -117,8 +128,8 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end) {
HandleScope scope(isolate);
ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
- MaybeHandle<WasmInstanceObject> instance = SyncCompileAndInstantiate(
- isolate, &thrower, ModuleWireBytes(module_start, module_end), {}, {});
+ MaybeHandle<WasmInstanceObject> instance = CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(module_start, module_end));
if (instance.is_null()) {
return -1;
}
@@ -130,15 +141,17 @@ int32_t CompileAndRunAsmWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end) {
HandleScope scope(isolate);
ErrorThrower thrower(isolate, "CompileAndRunAsmWasmModule");
- MaybeHandle<WasmModuleObject> module = wasm::SyncCompileTranslatedAsmJs(
- isolate, &thrower, ModuleWireBytes(module_start, module_end),
- Handle<Script>::null(), Vector<const byte>());
+ MaybeHandle<WasmModuleObject> module =
+ isolate->wasm_engine()->SyncCompileTranslatedAsmJs(
+ isolate, &thrower, ModuleWireBytes(module_start, module_end),
+ Handle<Script>::null(), Vector<const byte>());
DCHECK_EQ(thrower.error(), module.is_null());
if (module.is_null()) return -1;
- MaybeHandle<WasmInstanceObject> instance = wasm::SyncInstantiate(
- isolate, &thrower, module.ToHandleChecked(), Handle<JSReceiver>::null(),
- Handle<JSArrayBuffer>::null());
+ MaybeHandle<WasmInstanceObject> instance =
+ isolate->wasm_engine()->SyncInstantiate(
+ isolate, &thrower, module.ToHandleChecked(),
+ Handle<JSReceiver>::null(), Handle<JSArrayBuffer>::null());
DCHECK_EQ(thrower.error(), instance.is_null());
if (instance.is_null()) return -1;
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index cf85822175..7b1c8bb5c0 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -54,6 +54,10 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end);
+// Decode, compile, and instantiate the given module with no imports.
+MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
+
// Interprets the given module, starting at the function specified by
// {function_index}. The return type of the function has to be int32. The module
// should not have any imports or exports
diff --git a/deps/v8/test/d8_default.gyp b/deps/v8/test/d8_default.gyp
deleted file mode 100644
index 399623d30a..0000000000
--- a/deps/v8/test/d8_default.gyp
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2018 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'd8_default_run',
- 'type': 'none',
- 'dependencies': [
- 'debugger/debugger.gyp:debugger_run',
- 'intl/intl.gyp:intl_run',
- 'message/message.gyp:message_run',
- 'mjsunit/mjsunit.gyp:mjsunit_run',
- 'preparser/preparser.gyp:preparser_run',
- 'webkit/webkit.gyp:webkit_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'd8_default.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/debugger/debug/debug-scopes-suspended-generators.js b/deps/v8/test/debugger/debug/debug-scopes-suspended-generators.js
index 2d9d253386..8d09e9c2a3 100644
--- a/deps/v8/test/debugger/debug/debug-scopes-suspended-generators.js
+++ b/deps/v8/test/debugger/debug/debug-scopes-suspended-generators.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --no-analyze-environment-liveness
+
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/debugger/debug/debug-setbreakpoint.js b/deps/v8/test/debugger/debug/debug-setbreakpoint.js
index bfcab7c6fd..28a12f78cc 100644
--- a/deps/v8/test/debugger/debug/debug-setbreakpoint.js
+++ b/deps/v8/test/debugger/debug/debug-setbreakpoint.js
@@ -129,8 +129,7 @@ assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by
function SetBreakpointInI1Script() {
var i_script = Debug.findScript(i1);
assertTrue(!!i_script, "invalid script for i1");
- Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- i_script.id, 4);
+ Debug.setScriptBreakPointById(i_script.id, 4);
}
// Creates the eval script and tries to set the breakpoint.
diff --git a/deps/v8/test/mjsunit/deserialize-script-id.js b/deps/v8/test/debugger/debug/deserialize-script-id.js
index 5dca9f353a..25b8c84749 100644
--- a/deps/v8/test/mjsunit/deserialize-script-id.js
+++ b/deps/v8/test/debugger/debug/deserialize-script-id.js
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax --cache=code
// Test that script ids are unique and we found the correct ones.
-var Debug = %GetDebugContext().Debug;
+var Debug = debug.Debug;
Debug.setListener(function(){});
var scripts = %DebugGetLoadedScripts();
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js b/deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js
new file mode 100644
index 0000000000..3bd1fead08
--- /dev/null
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/proxy-as-promise.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the exception thrown in a Promise.
+// We expect a normal Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+
+class P extends Promise {
+ constructor(...args) {
+ super(...args);
+ return new Proxy(this, {
+ get(target, property, receiver) {
+ if (property in target) {
+ return Reflect.get(target, property, receiver);
+ } else {
+ return (...args) =>
+ new Promise((resolve, reject) =>
+ target.then(v => resolve(v[property](...args)))
+ .catch(reject)
+ );
+ }
+ }
+ });
+ }
+}
+
+P.resolve({doStuff(){log.push(1)}}).doStuff()
+
+function listener(event, exec_state, event_data, data) {}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+%RunMicrotasks();
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/throw-with-throw-in-reject.js b/deps/v8/test/debugger/debug/es6/debug-promises/throw-with-throw-in-reject.js
index b17054b06d..5cca2f93f0 100644
--- a/deps/v8/test/debugger/debug/es6/debug-promises/throw-with-throw-in-reject.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/throw-with-throw-in-reject.js
@@ -3,15 +3,13 @@
// found in the LICENSE file.
-// Test debug events when an exception is thrown inside a Promise, which is
-// caught by a custom promise, which throws a new exception in its reject
-// handler. We expect two Exception debug events:
-// 1) when the exception is thrown in the promise q.
-// 2) when the custom reject closure in MyPromise throws an exception.
+// Test debug events when an exception is thrown inside a Promise,
+// which is caught by a custom promise, which throws a new exception
+// in its reject handler. We expect no Exception debug events.
Debug = debug.Debug;
-var expected_events = 1;
+var expected_events = 0;
var log = [];
var p = new Promise(function(resolve, reject) {
@@ -21,11 +19,9 @@ var p = new Promise(function(resolve, reject) {
function MyPromise(resolver) {
var reject = function() {
- log.push("throw in reject");
throw new Error("reject"); // event
};
var resolve = function() { };
- log.push("construct");
resolver(resolve, reject);
};
@@ -42,16 +38,7 @@ var q = p.then(
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Exception) {
- expected_events--;
- assertTrue(expected_events >= 0);
- if (expected_events == 0) {
- assertEquals(["resolve", "construct", "end main",
- "throw caught"], log);
- assertEquals("caught", event_data.exception().message);
- } else {
- assertUnreachable();
- }
- assertTrue(exec_state.frame(0).sourceLineText().indexOf('// event') > 0);
+ assertUnreachable();
}
} catch (e) {
%AbortJS(e + "\n" + e.stack);
@@ -68,8 +55,8 @@ function testDone(iteration) {
try {
assertTrue(iteration < 10);
if (expected_events === 0) {
- assertEquals(["resolve", "construct", "end main",
- "throw caught", "throw in reject"], log);
+ assertEquals(["resolve", "end main",
+ "throw caught"], log);
} else {
testDone(iteration + 1);
}
diff --git a/deps/v8/test/debugger/debug/regress/regress-3960.js b/deps/v8/test/debugger/debug/regress/regress-3960.js
deleted file mode 100644
index 6580d4b805..0000000000
--- a/deps/v8/test/debugger/debug/regress/regress-3960.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-// Test that setting break point works correctly when the debugger is
-// activated late, which leads to duplicate shared function infos.
-
-(function() {
- var Debug = %GetDebugContext().Debug;
-
- function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.Break) return;
- try {
- assertTrue(/foo/.test(exec_state.frame(0).sourceLineText()));
- break_count++;
- } catch (e) {
- exception = e;
- }
- }
-
- for (var i = 0; i < 3; i++) {
- var foo = function() { a = 1; }
- var exception = null;
- var break_count = 0;
- Debug.setListener(listener);
- if (i < 2) Debug.setBreakPoint(foo, 0, 0);
- assertTrue(/\[B\d\]a = 1/.test(Debug.showBreakPoints(foo)));
- foo();
- assertEquals(1, break_count);
- assertNull(exception);
- }
-
- Debug.setListener(null);
-})();
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-async.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-async.js
index c5c8eeb9c6..c7f55c148c 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-async.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-async.js
@@ -19,6 +19,8 @@ async function async() {
var g = generator();
+var p = new Promise(() => {});
+
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
@@ -30,6 +32,15 @@ function listener(event, exec_state, event_data, data) {
fail("generator()");
fail("g.next()");
fail("async()");
+ fail("Promise.resolve()");
+ fail("Promise.reject()");
+ fail("p.then(() => {})");
+ fail("p.catch(() => {})");
+ fail("p.finally(() => {})");
+ fail("Promise.all([p, p])");
+ fail("Promise.race([p, p])");
+ fail("(async function() {})()");
+ fail("(async function() { await 1; })()");
} catch (e) {
exception = e;
print(e, e.stack);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-builtins-2.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins-2.js
index 1b357ef6bb..b965c95249 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-builtins-2.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins-2.js
@@ -7,6 +7,10 @@ Debug = debug.Debug
var exception = null;
var date = new Date();
var map = new Map().set("a", "b").set("c", "d");
+var set = new Set([1, 2]);
+var weak_key = [];
+var weak_map = new WeakMap().set(weak_key, "a").set({}, "b");
+var weak_set = new WeakSet([weak_key, {}]);
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
@@ -64,12 +68,46 @@ function listener(event, exec_state, event_data, data) {
success(undefined, `map.entries()`);
success(undefined, `map.keys()`);
success(undefined, `map.values()`);
+ success(undefined, `map.forEach(()=>1)`);
+ success(true, `map.has("c")`);
success(2, `map.size`);
- fail(`map.has("c")`); // This sets a hash on the object.
- fail(`map.forEach(()=>1)`);
+ fail(`new Map([[1, 2]])`);
fail(`map.delete("a")`);
fail(`map.clear()`);
fail(`map.set("x", "y")`);
+
+ // Test Set functions.
+ success(undefined, `new Set()`);
+ success("[object Set]", `set.toString()`);
+ success(undefined, `set.entries()`);
+ success(undefined, `set.keys()`);
+ success(undefined, `set.values()`);
+ success(undefined, `set.forEach(()=>1)`);
+ success(true, `set.has(1)`);
+ success(2, `set.size`);
+ fail(`new Set([1])`);
+ fail(`set.add(2)`);
+ fail(`set.delete(1)`);
+ fail(`set.clear()`);
+
+ // Test WeakMap functions.
+ success(undefined, `new WeakMap()`);
+ success("[object WeakMap]", `weak_map.toString()`);
+ success("a", `weak_map.get(weak_key)`);
+ success(true, `weak_map.get([]) === undefined`);
+ success(true, `weak_map.has(weak_key)`);
+ fail(`new WeakMap([[[], {}]])`);
+ fail(`weak_map.delete("a")`);
+ fail(`weak_map.set("x", "y")`);
+
+ // Test WeakSet functions.
+ success(undefined, `new WeakSet()`);
+ success("[object WeakSet]", `weak_set.toString()`);
+ success(true, `weak_set.has(weak_key)`);
+ fail(`new WeakSet([[], {}])`);
+ fail(`weak_set.add([])`);
+ fail(`weak_set.delete("a")`);
+
} catch (e) {
exception = e;
print(e, e.stack);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-builtins.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
index 73edb03a60..a3240ef4f3 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-builtins.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
@@ -8,6 +8,9 @@ var exception = null;
var object_with_symbol_key = {[Symbol("a")]: 1};
var object_with_callbacks = { toString: () => "string", valueOf: () => 3};
var symbol_for_a = Symbol.for("a");
+var typed_array = new Uint8Array([1, 2, 3]);
+var array_buffer = new ArrayBuffer(3);
+var data_view = new DataView(new ArrayBuffer(8), 0, 8);
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
@@ -57,14 +60,18 @@ function listener(event, exec_state, event_data, data) {
success(3, `(object_with_callbacks).valueOf()`);
// Test Array functions.
+ success(true, `Array.isArray([1, 2, 3])`);
success([], `new Array()`);
+ success([undefined, undefined], `new Array(2)`);
+ success([1, 2], `new Array(1, 2)`);
+ fail(`Array.from([1, 2, 3])`);
+ fail(`Array.of(1, 2, 3)`);
var function_param = [
"forEach", "every", "some", "reduce", "reduceRight", "find", "filter",
"map", "findIndex"
];
- var fails = ["toString", "join", "toLocaleString", "pop", "push",
- "reverse", "shift", "unshift", "slice", "splice", "sort", "filter",
- "map", "copyWithin", "fill", "concat"];
+ var fails = ["toString", "join", "toLocaleString", "pop", "push", "reverse",
+ "shift", "unshift", "splice", "sort", "copyWithin", "fill"];
for (f of Object.getOwnPropertyNames(Array.prototype)) {
if (typeof Array.prototype[f] === "function") {
if (fails.includes(f)) {
@@ -81,6 +88,52 @@ function listener(event, exec_state, event_data, data) {
}
}
+ // Test ArrayBuffer functions.
+ success(3, `array_buffer.byteLength`);
+ success(2, `array_buffer.slice(1, 3).byteLength`);
+ success(true, `ArrayBuffer.isView(typed_array)`);
+
+ // Test DataView functions.
+ success(undefined, `new DataView(array_buffer, 1, 2)`);
+ success(undefined, `data_view.buffer`);
+ success(undefined, `data_view.byteLength`);
+ success(undefined, `data_view.byteOffset`);
+ for (f of Object.getOwnPropertyNames(DataView.prototype)) {
+ if (typeof data_view[f] === 'function' && f.startsWith('get'))
+ success(0, `data_view.${f}()`);
+ }
+
+ // Test TypedArray functions.
+ success({}, `new Uint8Array()`);
+ success({0: 0, 1: 0}, `new Uint8Array(2)`);
+ success({0: 1, 1: 2, 2: 3}, `new Uint8Array(typed_array)`);
+ success(true, `!!typed_array.buffer`);
+ success(0, `typed_array.byteOffset`);
+ success(3, `typed_array.byteLength`);
+ fail(`Uint8Array.of(1, 2)`);
+ function_param = [
+ "forEach", "every", "some", "reduce", "reduceRight", "find", "filter",
+ "map", "findIndex"
+ ];
+ fails = ["toString", "join", "toLocaleString", "reverse", "sort",
+ "copyWithin", "fill", "set"];
+ var typed_proto_proto = Object.getPrototypeOf(Object.getPrototypeOf(new Uint8Array()));
+ for (f of Object.getOwnPropertyNames(typed_proto_proto)) {
+ if (typeof typed_array[f] === "function" && f !== "constructor") {
+ if (fails.includes(f)) {
+ if (function_param.includes(f)) {
+ fail(`typed_array.${f}(()=>{});`);
+ } else {
+ fail(`typed_array.${f}();`);
+ }
+ } else if (function_param.includes(f)) {
+ exec_state.frame(0).evaluate(`typed_array.${f}(()=>{});`, true);
+ } else {
+ exec_state.frame(0).evaluate(`typed_array.${f}();`, true);
+ }
+ }
+ }
+
// Test Math functions.
for (f of Object.getOwnPropertyNames(Math)) {
if (typeof Math[f] === "function") {
@@ -140,6 +193,12 @@ function listener(event, exec_state, event_data, data) {
fail("'abcd'.search(/a/)");
fail("'abcd'.split(/a/)");
+ // Test RegExp functions.
+ fail(`/a/.compile()`);
+ fail(`/a/.exec('abc')`);
+ fail(`/a/.test('abc')`);
+ fail(`/a/.toString()`);
+
// Test JSON functions.
success('{"abc":[1,2]}', "JSON.stringify(JSON.parse('{\"abc\":[1,2]}'))");
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-control.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-control.js
index e19a277546..e19a277546 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-control.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-control.js
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-iife.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-iife.js
index c8dc2a528d..c8dc2a528d 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-iife.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-iife.js
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-ops.js
index b7e49dc88f..b7e49dc88f 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect-ops.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-ops.js
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js
index 26cf258a9d..bb6c38debd 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-no-side-effect.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect.js
@@ -14,6 +14,7 @@ var string2 = { toString() { print("x"); return "x"; } };
var array = [4, 5];
var error = new Error();
+function simple_return(x) { return x; }
function set_a() { a = 2; }
function get_a() { return a; }
var bound = get_a.bind(0);
@@ -61,6 +62,15 @@ function listener(event, exec_state, event_data, data) {
success("set_a", "set_a.name");
success(0, "bound.length");
success("bound get_a", "bound.name");
+ // Non-evaluated call.
+ success("abc", "['abc'].join('foo')");
+ // Constructed literals.
+ success([1], "[1]");
+ success({x: 1}, "({x: 1})");
+ fail("[a]");
+ fail("({x: a})");
+ // Test that template literal evaluation fails.
+ fail("simple_return`1`");
// Test that non-read-only code fails.
fail("exception = 1");
// Test that calling a non-read-only function fails.
diff --git a/deps/v8/test/debugger/debugger.gyp b/deps/v8/test/debugger/debugger.gyp
deleted file mode 100644
index c093d09717..0000000000
--- a/deps/v8/test/debugger/debugger.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'debugger_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'debugger.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/debugger/regress/regress-7421.js b/deps/v8/test/debugger/regress/regress-7421.js
new file mode 100644
index 0000000000..e82cc8a6db
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-7421.js
@@ -0,0 +1,78 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --block-concurrent-recompilation
+
+Debug = debug.Debug
+
+// Test that the side-effect check is not bypassed in optimized code.
+
+var exception = null;
+var counter = 0;
+
+function f1() {
+ counter++;
+}
+
+function wrapper1() {
+ for (var i = 0; i < 4; i++) {
+ // Get this function optimized before calling to increment.
+ // Check that that call performs the necessary side-effect checks.
+ %OptimizeOsr();
+ }
+ f1();
+}
+
+function f2() {
+ counter++;
+}
+
+function wrapper2(call) {
+ if (call) f2();
+}
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ function success(expectation, source) {
+ assertEquals(expectation,
+ exec_state.frame(0).evaluate(source, true).value());
+ }
+ function fail(source) {
+ assertThrows(() => exec_state.frame(0).evaluate(source, true),
+ EvalError);
+ }
+ wrapper1();
+ wrapper1();
+ fail("wrapper1()");
+
+ wrapper2(true);
+ wrapper2(false);
+ wrapper2(true);
+ %OptimizeFunctionOnNextCall(wrapper2);
+ wrapper2(false);
+ fail("wrapper2(true)");
+ fail("%OptimizeFunctionOnNextCall(wrapper2); wrapper2(true)");
+
+ %OptimizeFunctionOnNextCall(wrapper2, "concurrent");
+ wrapper2(false);
+ fail("%UnblockConcurrentRecompilation();" +
+ "%GetOptimizationStatus(wrapper2, 'sync');" +
+ "wrapper2(true);");
+ } catch (e) {
+ exception = e;
+ print(e, e.stack);
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function f() {
+ debugger;
+};
+
+f();
+
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-507070.js b/deps/v8/test/debugger/regress/regress-crbug-507070.js
index 0cb14b27e7..1ca452f580 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-507070.js
+++ b/deps/v8/test/debugger/regress/regress-crbug-507070.js
@@ -7,7 +7,7 @@
try { } catch(e) { }
try { try { } catch (e) { } } catch(e) { }
try {
- var Debug = %GetDebugContext().Debug;
+ var Debug = debug.Debug;
Debug.setListener(function(){});
} catch(e) { }
(function() {
diff --git a/deps/v8/test/debugger/regress/regress-crbug-808973.js b/deps/v8/test/debugger/regress/regress-crbug-808973.js
new file mode 100644
index 0000000000..e61cb3b562
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-crbug-808973.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --enable-inspector
+
+const Debug = debug.Debug;
+Debug.setListener(() => {});
+Debug.setBreakOnUncaughtException()
+
+function sleep() {
+ return new Promise(resolve => setTimeout(resolve, 1));
+}
+async function thrower() {
+ await sleep();
+ throw "a"; // Exception a
+}
+(async function() { thrower(); })();
diff --git a/deps/v8/test/debugger/test-api.js b/deps/v8/test/debugger/test-api.js
index 0e9670b277..e89318fc72 100644
--- a/deps/v8/test/debugger/test-api.js
+++ b/deps/v8/test/debugger/test-api.js
@@ -67,11 +67,6 @@ class DebugWrapper {
this.ExceptionBreak = { Caught : 0,
Uncaught: 1 };
- // The different script break point types.
- this.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1,
- ScriptRegExp: 2 };
-
// Store the current script id so we can skip corresponding break events.
this.thisScriptId = %FunctionGetScriptId(receive);
@@ -139,13 +134,6 @@ class DebugWrapper {
return this.setBreakPointAtLocation(scriptid, loc, opt_condition);
}
- setScriptBreakPoint(type, scriptid, opt_line, opt_column, opt_condition) {
- // Only sets by script id are supported for now.
- assertEquals(this.ScriptBreakPointType.ScriptId, type);
- return this.setScriptBreakPointById(scriptid, opt_line, opt_column,
- opt_condition);
- }
-
setScriptBreakPointById(scriptid, opt_line, opt_column, opt_condition) {
const loc = %ScriptLocationFromLine2(scriptid, opt_line, opt_column, 0);
return this.setBreakPointAtLocation(scriptid, loc, opt_condition);
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index e287077ec3..da923ff63b 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -12,7 +12,7 @@ FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
@@ -62,21 +62,21 @@ class TestCase(testcase.TestCase):
files.append(os.path.join(self.suite.root, self.path + self._get_suffix()))
return files
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
files = self._source_files
- if ctx.isolates:
+ if self._test_config.isolates:
files = files + ['--isolate'] + files
return files
def _get_source_flags(self):
return self._source_flags
- def _get_suite_flags(self, ctx):
+ def _get_suite_flags(self):
return ['--enable-inspector', '--allow-natives-syntax']
def _get_source_path(self):
return os.path.join(self.suite.root, self.path + self._get_suffix())
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/default.gyp b/deps/v8/test/default.gyp
deleted file mode 100644
index d1007b8a96..0000000000
--- a/deps/v8/test/default.gyp
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'default_run',
- 'type': 'none',
- 'dependencies': [
- 'cctest/cctest.gyp:cctest_run',
- 'debugger/debugger.gyp:debugger_run',
- 'fuzzer/fuzzer.gyp:fuzzer_run',
- 'inspector/inspector.gyp:inspector-test_run',
- 'intl/intl.gyp:intl_run',
- 'message/message.gyp:message_run',
- 'mjsunit/mjsunit.gyp:mjsunit_run',
- 'preparser/preparser.gyp:preparser_run',
- 'unittests/unittests.gyp:unittests_run',
- 'wasm-spec-tests/wasm-spec-tests.gyp:wasm_spec_tests_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'default.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/fuzzer/README.md b/deps/v8/test/fuzzer/README.md
index 5f10d47ec4..c4fd22a9e7 100644
--- a/deps/v8/test/fuzzer/README.md
+++ b/deps/v8/test/fuzzer/README.md
@@ -45,18 +45,10 @@ the fuzzer is called `foo`.
* Use this binary to reproduce issues found by cluster fuzz, e.g.
`out/x64.debug/v8_simple_foo_fuzzer testcase.foo`
-5. Copy the build rules of an existing fuzzer in
- [test/fuzzer/fuzzer.gyp](https://cs.chromium.org/chromium/src/v8/test/fuzzer/fuzzer.gyp),
- e.g. the build rules for the
- [wasm.cc](https://cs.chromium.org/chromium/src/v8/test/fuzzer/wasm.cc) fuzzer
- are `v8_simple_wasm_fuzzer` and `wasm_fuzzer_lib`
-
- * This build rule is needed to compile with gyp
-
-6. Copy the binary name and the test directory name in
+5. Copy the binary name and the test directory name in
[test/fuzzer/fuzzer.isolate](https://cs.chromium.org/chromium/src/v8/test/fuzzer/fuzzer.isolate)
-7. Add the fuzzer to the FuzzerTestSuite in
+6. Add the fuzzer to the FuzzerTestSuite in
[test/fuzzer/testcfg.py](https://cs.chromium.org/chromium/src/v8/test/fuzzer/testcfg.py)
* This step is needed to run the fuzzer with the files created in Step 2 on
diff --git a/deps/v8/test/fuzzer/fuzzer.isolate b/deps/v8/test/fuzzer/fuzzer.isolate
index 9391dcc7c0..adaf1f9f11 100644
--- a/deps/v8/test/fuzzer/fuzzer.isolate
+++ b/deps/v8/test/fuzzer/fuzzer.isolate
@@ -7,11 +7,11 @@
'files': [
'<(PRODUCT_DIR)/v8_simple_json_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_parser_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_regexp_builtins_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_regexp_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_multi_return_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_async_fuzzer<(EXECUTABLE_SUFFIX)',
- '<(PRODUCT_DIR)/v8_simple_wasm_call_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_code_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_compile_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_data_section_fuzzer<(EXECUTABLE_SUFFIX)',
@@ -26,10 +26,10 @@
'./json/',
'./parser/',
'./regexp/',
+ './regexp_builtins/',
'./multi_return/',
'./wasm/',
'./wasm_async/',
- './wasm_call/',
'./wasm_code/',
'./wasm_compile/',
'./wasm_data_section/',
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 4766774005..3c372f92fd 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -17,6 +17,7 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/simulator.h"
+#include "src/wasm/wasm-limits.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
#include "test/fuzzer/fuzzer-support.h"
@@ -227,7 +228,11 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
InputProvider input(data, size);
// Create randomized descriptor.
size_t param_count = input.NumNonZeroBytes(0, kNumTypes);
+ if (param_count > Code::kMaxArguments) return 0;
+
size_t return_count = input.NumNonZeroBytes(param_count + 1, kNumTypes);
+ if (return_count > wasm::kV8MaxWasmFunctionMultiReturns) return 0;
+
CallDescriptor* desc =
CreateRandomCallDescriptor(&zone, return_count, param_count, &input);
@@ -282,7 +287,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
returns[i] = Constant(callee, type, 42);
outputs[i] = 42;
} else {
- int n = input.NextInt8(counts[index(type)]);
+ int n = input.NextInt32(counts[index(type)]);
int k = 0;
while (desc->GetParameterType(k) != desc->GetReturnType(i) || --n > 0) {
++k;
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
new file mode 100644
index 0000000000..cc02958f2a
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -0,0 +1,448 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <functional>
+#include <string>
+
+#include "include/v8.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/regexp/jsregexp.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+// This is a hexdump of test/fuzzer/regexp_builtins/mjsunit.js generated using
+// `xxd -i mjsunit.js`. It contains the `assertEquals` JS function used below.
+#include "test/fuzzer/regexp_builtins/mjsunit.js.h"
+
+namespace v8 {
+namespace internal {
+namespace {
+
+constexpr bool kVerbose = false; // For debugging, verbose error messages.
+constexpr uint32_t kRegExpBuiltinsFuzzerHashSeed = 83;
+
+#define REGEXP_BUILTINS(V) \
+ V(Exec, exec) \
+ V(Match, Symbol.match) \
+ V(Replace, Symbol.replace) \
+ V(Search, Symbol.search) \
+ V(Split, Symbol.split) \
+ V(Test, test)
+
+struct FuzzerArgs {
+ FuzzerArgs(const uint8_t* input_data, size_t input_length,
+ v8::Local<v8::Context> context, Isolate* isolate)
+ : input_cursor(0),
+ input_data(input_data),
+ input_length(input_length),
+ context(context),
+ isolate(isolate) {}
+
+ size_t input_cursor;
+ const uint8_t* const input_data;
+ const size_t input_length;
+ v8::Local<v8::Context> context;
+ Isolate* const isolate;
+};
+
+enum RegExpBuiltin {
+#define CASE(name, ...) kRegExpPrototype##name,
+ REGEXP_BUILTINS(CASE)
+#undef CASE
+ kRegExpBuiltinCount,
+};
+
+#define CASE(name, ...) void TestRegExpPrototype##name(FuzzerArgs* args);
+REGEXP_BUILTINS(CASE)
+#undef CASE
+
+v8::Local<v8::String> v8_str(const char* s) {
+ return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), s,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+}
+
+v8::MaybeLocal<v8::Value> CompileRun(v8::Local<v8::Context> context,
+ const char* source) {
+ v8::Local<v8::Script> script;
+ v8::MaybeLocal<v8::Script> maybe_script =
+ v8::Script::Compile(context, v8_str(source));
+
+ if (!maybe_script.ToLocal(&script)) return v8::MaybeLocal<v8::Value>();
+ return script->Run(context);
+}
+
+uint8_t RandomByte(FuzzerArgs* args) {
+ // Silently wraps to the beginning of input data. Ideally, input data should
+ // be long enough to avoid that.
+ const size_t index = args->input_cursor;
+ CHECK(index < args->input_length);
+ args->input_cursor = (index + 1) % args->input_length;
+ return args->input_data[index];
+}
+
+void CompileMjsunit(const FuzzerArgs* args) {
+ std::string source(
+ reinterpret_cast<const char*>(test_fuzzer_regexp_builtins_mjsunit_js),
+ test_fuzzer_regexp_builtins_mjsunit_js_len);
+ CompileRun(args->context, source.c_str()).ToLocalChecked();
+}
+
+std::string NaiveEscape(const std::string& input, char escaped_char) {
+ std::string out;
+ for (size_t i = 0; i < input.size(); i++) {
+ // Just omit newlines and \0 chars and naively replace other escaped chars.
+ const char c = input[i];
+ if (c == '\r' || c == '\n' || c == '\0') continue;
+ out += (input[i] == escaped_char) ? '_' : c;
+ }
+ // Disallow trailing backslashes as they mess with our naive source string
+ // concatenation.
+ if (out.back() == '\\') out.back() = '_';
+
+ return out;
+}
+
+std::string GenerateRandomString(FuzzerArgs* args, size_t length) {
+ // Limited to an ASCII subset for now.
+ std::string s(length, '\0');
+ for (size_t i = 0; i < length; i++) {
+ s[i] = static_cast<char>((RandomByte(args) % 0x5F) + 0x20);
+ }
+
+ return s;
+}
+
+std::string GenerateRandomPattern(FuzzerArgs* args) {
+ const int kMaxPatternLength = 16;
+ std::string s =
+ GenerateRandomString(args, (RandomByte(args) % kMaxPatternLength) + 1);
+ // A leading '*' would be a comment instead of a regexp literal.
+ if (s[0] == '*') s[0] = '.';
+ return s;
+}
+
+std::string PickRandomPresetPattern(FuzzerArgs* args) {
+ static const char* preset_patterns[] = {
+ ".", // Always matches.
+ "\\P{Any}", // Never matches.
+ "^", // Zero-width assertion, matches once.
+ "(?=.)", // Zero-width assertion, matches at every position.
+ "\\b", // Zero-width assertion, matches at each word boundary.
+ "()", // Zero-width assertion, matches at every position with groups.
+ "(?<a>)", // Likewise but with named groups.
+ "((((.).).).)", "(?<a>(?<b>(?<c>(?<d>.).).).)",
+ // Copied from
+ // https://cs.chromium.org/chromium/src/testing/libfuzzer/fuzzers/dicts/regexp.dict
+ "?", "abc", "()", "[]", "abc|def", "abc|def|ghi", "^xxx$",
+ "ab\\b\\d\\bcd", "\\w|\\d", "a*?", "abc+", "abc+?", "xyz?", "xyz??",
+ "xyz{0,1}", "xyz{0,1}?", "xyz{93}", "xyz{1,32}", "xyz{1,32}?", "xyz{1,}",
+ "xyz{1,}?", "a\\fb\\nc\\rd\\te\\vf", "a\\nb\\bc", "(?:foo)", "(?: foo )",
+ "foo|(bar|baz)|quux", "foo(?=bar)baz", "foo(?!bar)baz", "foo(?<=bar)baz",
+ "foo(?<!bar)baz", "()", "(?=)", "[]", "[x]", "[xyz]", "[a-zA-Z0-9]",
+ "[-123]", "[^123]", "]", "}", "[a-b-c]", "[x\\dz]", "[\\d-z]",
+ "[\\d-\\d]", "[z-\\d]", "\\cj\\cJ\\ci\\cI\\ck\\cK", "\\c!", "\\c_",
+ "\\c~", "[\\c!]", "[\\c_]", "[\\c~]", "[\\ca]", "[\\cz]", "[\\cA]",
+ "[\\cZ]", "[\\c1]", "\\[\\]\\{\\}\\(\\)\\%\\^\\#\\ ",
+ "[\\[\\]\\{\\}\\(\\)\\%\\^\\#\\ ]", "\\8", "\\9", "\\11", "\\11a",
+ "\\011", "\\118", "\\111", "\\1111", "(x)(x)(x)\\1", "(x)(x)(x)\\2",
+ "(x)(x)(x)\\3", "(x)(x)(x)\\4", "(x)(x)(x)\\1*", "(x)(x)(x)\\3*",
+ "(x)(x)(x)\\4*", "(x)(x)(x)(x)(x)(x)(x)(x)(x)(x)\\10",
+ "(x)(x)(x)(x)(x)(x)(x)(x)(x)(x)\\11", "(a)\\1", "(a\\1)", "(\\1a)",
+ "(\\2)(\\1)", "(?=a){0,10}a", "(?=a){1,10}a", "(?=a){9,10}a", "(?!a)?a",
+ "\\1(a)", "(?!(a))\\1", "(?!\\1(a\\1)\\1)\\1",
+ "\\1\\2(a(?:\\1(b\\1\\2))\\2)\\1", "[\\0]", "[\\11]", "[\\11a]",
+ "[\\011]", "[\\00011]", "[\\118]", "[\\111]", "[\\1111]", "\\x60",
+ "\\x3z", "\\c", "\\u0034", "\\u003z", "foo[z]*", "\\u{12345}",
+ "\\u{12345}\\u{23456}", "\\u{12345}{3}", "\\u{12345}*", "\\ud808\\udf45*",
+ "[\\ud808\\udf45-\\ud809\\udccc]", "a", "a|b", "a\\n", "a$", "a\\b!",
+ "a\\Bb", "a*?", "a?", "a??", "a{0,1}?", "a{1,2}?", "a+?", "(a)", "(a)\\1",
+ "(\\1a)", "\\1(a)", "a\\s", "a\\S", "a\\D", "a\\w", "a\\W", "a.", "a\\q",
+ "a[a]", "a[^a]", "a[a-z]", "a(?:b)", "a(?=b)", "a(?!b)", "\\x60",
+ "\\u0060", "\\cA", "\\q", "\\1112", "(a)\\1", "(?!a)?a\\1",
+ "(?:(?=a))a\\1", "a{}", "a{,}", "a{", "a{z}", "a{12z}", "a{12,",
+ "a{12,3b", "{}", "{,}", "{", "{z}", "{1z}", "{12,", "{12,3b", "a", "abc",
+ "a[bc]d", "a|bc", "ab|c", "a||bc", "(?:ab)", "(?:ab|cde)", "(?:ab)|cde",
+ "(ab)", "(ab|cde)", "(ab)\\1", "(ab|cde)\\1", "(?:ab)?", "(?:ab)+", "a?",
+ "a+", "a??", "a*?", "a+?", "(?:a?)?", "(?:a+)?", "(?:a?)+", "(?:a*)+",
+ "(?:a+)+", "(?:a?)*", "(?:a*)*", "(?:a+)*", "a{0}", "(?:a+){0,0}", "a*b",
+ "a+b", "a*b|c", "a+b|c", "(?:a{5,1000000}){3,1000000}", "(?:ab){4,7}",
+ "a\\bc", "a\\sc", "a\\Sc", "a(?=b)c", "a(?=bbb|bb)c", "a(?!bbb|bb)c",
+ "\xe2\x81\xa3", "[\xe2\x81\xa3]", "\xed\xb0\x80", "\xed\xa0\x80",
+ "(\xed\xb0\x80)\x01", "((\xed\xa0\x80))\x02", "\xf0\x9f\x92\xa9", "\x01",
+ "\x0f", "[-\xf0\x9f\x92\xa9]+", "[\xf0\x9f\x92\xa9-\xf4\x8f\xbf\xbf]",
+ "(?<=)", "(?<=a)", "(?<!)", "(?<!a)", "(?<a>)", "(?<a>.)",
+ "(?<a>.)\\k<a>", "\\p{Script=Greek}", "\\P{sc=Greek}",
+ "\\p{Script_Extensions=Greek}", "\\P{scx=Greek}",
+ "\\p{General_Category=Decimal_Number}", "\\P{gc=Decimal_Number}",
+ "\\p{gc=Nd}", "\\P{Decimal_Number}", "\\p{Nd}", "\\P{Any}",
+ "\\p{Changes_When_NFKC_Casefolded}",
+ };
+ static constexpr int preset_pattern_count = arraysize(preset_patterns);
+ STATIC_ASSERT(preset_pattern_count < 0xFF);
+
+ return std::string(preset_patterns[RandomByte(args) % preset_pattern_count]);
+}
+
+std::string PickPattern(FuzzerArgs* args) {
+ if ((RandomByte(args) & 3) == 0) {
+ return NaiveEscape(GenerateRandomPattern(args), '/');
+ } else {
+ return PickRandomPresetPattern(args);
+ }
+}
+
+std::string GenerateRandomString(FuzzerArgs* args) {
+ const int kMaxStringLength = 64;
+ return GenerateRandomString(args, RandomByte(args) % kMaxStringLength);
+}
+
+std::string PickSubjectString(FuzzerArgs* args) {
+ if ((RandomByte(args) & 0xF) == 0) {
+ // Sometimes we have a two-byte subject string.
+ return "f\\uD83D\\uDCA9ba\\u2603";
+ } else {
+ return NaiveEscape(GenerateRandomString(args), '\'');
+ }
+}
+
+std::string PickReplacementForReplace(FuzzerArgs* args) {
+ static const char* candidates[] = {
+ "'X'",
+ "'$1$2$3'",
+ "'$$$&$`$\\'$1'",
+ "() => 'X'",
+ "(arg0, arg1, arg2, arg3, arg4) => arg0 + arg1 + arg2 + arg3 + arg4",
+ "() => 42",
+ };
+ static const int candidate_count = arraysize(candidates);
+
+ if ((RandomByte(args) & 1) == 0) {
+ return candidates[RandomByte(args) % candidate_count];
+ } else {
+ return std::string("'") + NaiveEscape(GenerateRandomString(args), '\'') +
+ std::string("'");
+ }
+}
+
+std::string PickLimitForSplit(FuzzerArgs* args) {
+ // clang-format off
+ switch (RandomByte(args) & 0x3) {
+ case 0: return "undefined";
+ case 1: return "'not a number'";
+ case 2: return std::to_string(Smi::kMaxValue + RandomByte(args));
+ case 3: return std::to_string(RandomByte(args));
+ default: UNREACHABLE();
+ } // clang-format on
+}
+
+std::string GenerateRandomFlags(FuzzerArgs* args) {
+ constexpr size_t kFlagCount = JSRegExp::FlagCount();
+ CHECK_EQ(JSRegExp::kDotAll, 1 << (kFlagCount - 1));
+ STATIC_ASSERT((1 << kFlagCount) - 1 < 0xFF);
+
+ const size_t flags = RandomByte(args) & ((1 << kFlagCount) - 1);
+
+ int cursor = 0;
+ char buffer[kFlagCount] = {'\0'};
+
+ if (flags & JSRegExp::kGlobal) buffer[cursor++] = 'g';
+ if (flags & JSRegExp::kIgnoreCase) buffer[cursor++] = 'i';
+ if (flags & JSRegExp::kMultiline) buffer[cursor++] = 'm';
+ if (flags & JSRegExp::kSticky) buffer[cursor++] = 'y';
+ if (flags & JSRegExp::kUnicode) buffer[cursor++] = 'u';
+ if (flags & JSRegExp::kDotAll) buffer[cursor++] = 's';
+
+ return std::string(buffer, cursor);
+}
+
+std::string GenerateRandomLastIndex(FuzzerArgs* args) {
+ static const char* candidates[] = {
+ "undefined", "-1", "0",
+ "1", "2", "3",
+ "4", "5", "6",
+ "7", "8", "9",
+ "50", "4294967296", "2147483647",
+ "2147483648", "NaN", "Not a Number",
+ };
+ static const int candidate_count = arraysize(candidates);
+ return candidates[RandomByte(args) % candidate_count];
+}
+
+void RunTest(FuzzerArgs* args) {
+ switch (RandomByte(args) % kRegExpBuiltinCount) {
+#define CASE(name, ...) \
+ case kRegExpPrototype##name: \
+ TestRegExpPrototype##name(args); \
+ break;
+ REGEXP_BUILTINS(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ }
+}
+
+std::string GenerateSourceString(FuzzerArgs* args, const std::string& test) {
+ std::string pattern = PickPattern(args);
+ std::string flags = GenerateRandomFlags(args);
+ std::string last_index = GenerateRandomLastIndex(args);
+ std::string subject = PickSubjectString(args);
+
+ // clang-format off
+ std::stringstream ss;
+ ss << "function test() {\n"
+ << " const re = /" << pattern<< "/"
+ << flags << ";\n"
+ << " re.lastIndex = " << last_index << ";\n"
+ << " const str = '" << subject << "';\n"
+ << " let result = null;\n"
+ << " let exception = null;\n"
+ << " try {\n"
+ << " result = " << test << "\n"
+ << " } catch (e) {\n"
+ << " exception = e;\n"
+ << " }\n"
+ << " return { result: result, re: re, exception: exception };\n"
+ << "}\n"
+ << "%SetForceSlowPath(false);\n"
+ << "test(); // Run once ahead of time to compile the regexp.\n"
+ << "const fast = test();\n"
+ << "%SetForceSlowPath(true);\n"
+ << "const slow = test();\n"
+ << "%SetForceSlowPath(false);\n";
+ // clang-format on
+ return ss.str();
+}
+
+void PrintExceptionMessage(v8::TryCatch* try_catch) {
+ CHECK(try_catch->HasCaught());
+ static const int kBufferLength = 256;
+ char buffer[kBufferLength + 1];
+ try_catch->Message()->Get()->WriteOneByte(
+ reinterpret_cast<uint8_t*>(&buffer[0]), 0, kBufferLength);
+ fprintf(stderr, "%s\n", buffer);
+}
+
+bool ResultsAreIdentical(FuzzerArgs* args) {
+ std::string source =
+ "assertEquals(fast.exception, slow.exception);\n"
+ "assertEquals(fast.result, slow.result);\n"
+ "if (fast.result !== null)\n"
+ " assertEquals(fast.result.groups, slow.result.groups);\n"
+ "assertEquals(fast.re.lastIndex, slow.re.lastIndex);\n";
+
+ v8::Local<v8::Value> result;
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(args->isolate));
+ if (!CompileRun(args->context, source.c_str()).ToLocal(&result)) {
+ PrintExceptionMessage(&try_catch);
+ args->isolate->clear_pending_exception();
+ return false;
+ }
+
+ return true;
+}
+
+void CompileRunAndVerify(FuzzerArgs* args, const std::string& source) {
+ v8::Local<v8::Value> result;
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(args->isolate));
+ if (!CompileRun(args->context, source.c_str()).ToLocal(&result)) {
+ args->isolate->clear_pending_exception();
+ // No need to verify result if an exception was thrown here, since that
+ // implies a syntax error somewhere in the pattern or string. We simply
+ // ignore those.
+ if (kVerbose) {
+ PrintExceptionMessage(&try_catch);
+ fprintf(stderr, "Failed to run script:\n```\n%s\n```\n", source.c_str());
+ }
+ return;
+ }
+
+ if (!ResultsAreIdentical(args)) {
+ uint32_t hash = StringHasher::HashSequentialString(
+ args->input_data, static_cast<int>(args->input_length),
+ kRegExpBuiltinsFuzzerHashSeed);
+ V8_Fatal(__FILE__, __LINE__,
+ "!ResultAreIdentical(args); RegExpBuiltinsFuzzerHash=%x", hash);
+ }
+}
+
+void TestRegExpPrototypeExec(FuzzerArgs* args) {
+ std::string test = "re.exec(str);";
+ std::string source = GenerateSourceString(args, test);
+ CompileRunAndVerify(args, source);
+}
+
+void TestRegExpPrototypeMatch(FuzzerArgs* args) {
+ std::string test = "re[Symbol.match](str);";
+ std::string source = GenerateSourceString(args, test);
+ CompileRunAndVerify(args, source);
+}
+
+void TestRegExpPrototypeReplace(FuzzerArgs* args) {
+ std::string replacement = PickReplacementForReplace(args);
+ std::string test = "re[Symbol.replace](str, " + replacement + ");";
+ std::string source = GenerateSourceString(args, test);
+ CompileRunAndVerify(args, source);
+}
+
+void TestRegExpPrototypeSearch(FuzzerArgs* args) {
+ std::string test = "re[Symbol.search](str);";
+ std::string source = GenerateSourceString(args, test);
+ CompileRunAndVerify(args, source);
+}
+
+void TestRegExpPrototypeSplit(FuzzerArgs* args) {
+ std::string limit = PickLimitForSplit(args);
+ std::string test = "re[Symbol.split](str, " + limit + ");";
+ std::string source = GenerateSourceString(args, test);
+ CompileRunAndVerify(args, source);
+}
+
+void TestRegExpPrototypeTest(FuzzerArgs* args) {
+ std::string test = "re.test(str);";
+ std::string source = GenerateSourceString(args, test);
+ CompileRunAndVerify(args, source);
+}
+
+#undef REGEXP_BUILTINS
+
+} // namespace
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if (size < 64) return 0; // Need a minimal amount of randomness to do stuff.
+
+ // Flag definitions.
+
+ FLAG_allow_natives_syntax = true;
+
+ // V8 setup.
+
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = support->GetContext();
+ v8::Context::Scope context_scope(context);
+ v8::TryCatch try_catch(isolate);
+
+ CHECK(!i_isolate->has_pending_exception());
+
+ // And run.
+
+ FuzzerArgs args(data, size, context, i_isolate);
+ CompileMjsunit(&args);
+ RunTest(&args);
+
+ CHECK(!i_isolate->has_pending_exception());
+ return 0;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/fuzzer/regexp_builtins/mjsunit.js b/deps/v8/test/fuzzer/regexp_builtins/mjsunit.js
new file mode 100644
index 0000000000..53795dc63d
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp_builtins/mjsunit.js
@@ -0,0 +1,188 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Stripped-down version of test/mjsunit/mjsunit.js that only contains
+// assertEquals.S
+var assertEquals;
+
+(function () {
+ var ObjectPrototypeToString = Object.prototype.toString;
+ var NumberPrototypeValueOf = Number.prototype.valueOf;
+ var BooleanPrototypeValueOf = Boolean.prototype.valueOf;
+ var StringPrototypeValueOf = String.prototype.valueOf;
+ var DatePrototypeValueOf = Date.prototype.valueOf;
+ var RegExpPrototypeToString = RegExp.prototype.toString;
+ var ArrayPrototypeForEach = Array.prototype.forEach;
+ var ArrayPrototypeJoin = Array.prototype.join;
+ var ArrayPrototypeMap = Array.prototype.map;
+ var ArrayPrototypePush = Array.prototype.push;
+
+ var BigIntPrototypeValueOf;
+ try {
+ BigIntPrototypeValueOf = BigInt.prototype.valueOf;
+ } catch(e) {}
+
+ function classOf(object) {
+ var string = ObjectPrototypeToString.call(object);
+ return string.substring(8, string.length - 1);
+ }
+
+ function ValueOf(value) {
+ switch (classOf(value)) {
+ case "Number":
+ return NumberPrototypeValueOf.call(value);
+ case "BigInt":
+ return BigIntPrototypeValueOf.call(value);
+ case "String":
+ return StringPrototypeValueOf.call(value);
+ case "Boolean":
+ return BooleanPrototypeValueOf.call(value);
+ case "Date":
+ return DatePrototypeValueOf.call(value);
+ default:
+ return value;
+ }
+ }
+
+
+ function PrettyPrint(value) {
+ switch (typeof value) {
+ case "string":
+ return JSON.stringify(value);
+ case "bigint":
+ return String(value) + "n";
+ case "number":
+ if (value === 0 && (1 / value) < 0) return "-0";
+ // FALLTHROUGH.
+ case "boolean":
+ case "undefined":
+ case "function":
+ case "symbol":
+ return String(value);
+ case "object":
+ if (value === null) return "null";
+ var objectClass = classOf(value);
+ switch (objectClass) {
+ case "Number":
+ case "BigInt":
+ case "String":
+ case "Boolean":
+ case "Date":
+ return objectClass + "(" + PrettyPrint(ValueOf(value)) + ")";
+ case "RegExp":
+ return RegExpPrototypeToString.call(value);
+ case "Array":
+ var mapped = ArrayPrototypeMap.call(value, PrettyPrintArrayElement);
+ var joined = ArrayPrototypeJoin.call(mapped, ",");
+ return "[" + joined + "]";
+ case "Uint8Array":
+ case "Int8Array":
+ case "Int16Array":
+ case "Uint16Array":
+ case "Uint32Array":
+ case "Int32Array":
+ case "Float32Array":
+ case "Float64Array":
+ var joined = ArrayPrototypeJoin.call(value, ",");
+ return objectClass + "([" + joined + "])";
+ case "Object":
+ break;
+ default:
+ return objectClass + "()";
+ }
+ var name = value.constructor.name;
+ if (name) return name + "()";
+ return "Object()";
+ default:
+ return "-- unknown value --";
+ }
+ }
+
+ function PrettyPrintArrayElement(value, index, array) {
+ if (value === undefined && !(index in array)) return "";
+ return PrettyPrint(value);
+ }
+
+ failWithMessage = function failWithMessage(message) {
+ throw new Error(message);
+ }
+
+ function formatFailureText(expectedText, found, name_opt) {
+ var message = "Fail" + "ure";
+ if (name_opt) {
+ message += " (" + name_opt + ")";
+ }
+
+ var foundText = PrettyPrint(found);
+ if (expectedText.length <= 40 && foundText.length <= 40) {
+ message += ": expected <" + expectedText + "> found <" + foundText + ">";
+ } else {
+ message += ":\nexpected:\n" + expectedText + "\nfound:\n" + foundText;
+ }
+ return message;
+ }
+
+ function fail(expectedText, found, name_opt) {
+ return failWithMessage(formatFailureText(expectedText, found, name_opt));
+ }
+
+ function deepObjectEquals(a, b) {
+ var aProps = Object.keys(a);
+ aProps.sort();
+ var bProps = Object.keys(b);
+ bProps.sort();
+ if (!deepEquals(aProps, bProps)) {
+ return false;
+ }
+ for (var i = 0; i < aProps.length; i++) {
+ if (!deepEquals(a[aProps[i]], b[aProps[i]])) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ function deepEquals(a, b) {
+ if (a === b) {
+ // Check for -0.
+ if (a === 0) return (1 / a) === (1 / b);
+ return true;
+ }
+ if (typeof a !== typeof b) return false;
+ if (typeof a === "number") return isNaN(a) && isNaN(b);
+ if (typeof a !== "object" && typeof a !== "function") return false;
+ // Neither a nor b is primitive.
+ var objectClass = classOf(a);
+ if (objectClass !== classOf(b)) return false;
+ if (objectClass === "RegExp") {
+ // For RegExp, just compare pattern and flags using its toString.
+ return RegExpPrototypeToString.call(a) ===
+ RegExpPrototypeToString.call(b);
+ }
+ // Functions are only identical to themselves.
+ if (objectClass === "Function") return false;
+ if (objectClass === "Array") {
+ var elementCount = 0;
+ if (a.length !== b.length) {
+ return false;
+ }
+ for (var i = 0; i < a.length; i++) {
+ if (!deepEquals(a[i], b[i])) return false;
+ }
+ return true;
+ }
+ if (objectClass === "String" || objectClass === "Number" ||
+ objectClass === "BigInt" || objectClass === "Boolean" ||
+ objectClass === "Date") {
+ if (ValueOf(a) !== ValueOf(b)) return false;
+ }
+ return deepObjectEquals(a, b);
+ }
+
+ assertEquals = function assertEquals(expected, found, name_opt) {
+ if (!deepEquals(found, expected)) {
+ fail(PrettyPrint(expected), found, name_opt);
+ }
+ };
+})();
diff --git a/deps/v8/test/fuzzer/regexp_builtins/mjsunit.js.h b/deps/v8/test/fuzzer/regexp_builtins/mjsunit.js.h
new file mode 100644
index 0000000000..696e3b9eca
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp_builtins/mjsunit.js.h
@@ -0,0 +1,496 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+unsigned char test_fuzzer_regexp_builtins_mjsunit_js[] = {
+ 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74,
+ 0x20, 0x32, 0x30, 0x31, 0x37, 0x20, 0x74, 0x68, 0x65, 0x20, 0x56, 0x38,
+ 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x61, 0x75, 0x74,
+ 0x68, 0x6f, 0x72, 0x73, 0x2e, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x72, 0x69,
+ 0x67, 0x68, 0x74, 0x73, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x2e, 0x0a, 0x2f, 0x2f, 0x20, 0x55, 0x73, 0x65, 0x20, 0x6f, 0x66,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x20, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x73, 0x20, 0x67, 0x6f, 0x76,
+ 0x65, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x61, 0x20, 0x42,
+ 0x53, 0x44, 0x2d, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x20, 0x6c, 0x69, 0x63,
+ 0x65, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x63, 0x61,
+ 0x6e, 0x20, 0x62, 0x65, 0x0a, 0x2f, 0x2f, 0x20, 0x66, 0x6f, 0x75, 0x6e,
+ 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, 0x49, 0x43,
+ 0x45, 0x4e, 0x53, 0x45, 0x20, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x0a, 0x0a,
+ 0x2f, 0x2f, 0x20, 0x53, 0x74, 0x72, 0x69, 0x70, 0x70, 0x65, 0x64, 0x2d,
+ 0x64, 0x6f, 0x77, 0x6e, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x20, 0x6f, 0x66, 0x20, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x6d, 0x6a, 0x73,
+ 0x75, 0x6e, 0x69, 0x74, 0x2f, 0x6d, 0x6a, 0x73, 0x75, 0x6e, 0x69, 0x74,
+ 0x2e, 0x6a, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6f, 0x6e, 0x6c,
+ 0x79, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x0a, 0x2f,
+ 0x2f, 0x20, 0x61, 0x73, 0x73, 0x65, 0x72, 0x74, 0x45, 0x71, 0x75, 0x61,
+ 0x6c, 0x73, 0x2e, 0x53, 0x0a, 0x76, 0x61, 0x72, 0x20, 0x61, 0x73, 0x73,
+ 0x65, 0x72, 0x74, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x3b, 0x0a, 0x0a,
+ 0x28, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x29,
+ 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65,
+ 0x54, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x3b, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x4e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x20, 0x3d, 0x20, 0x4e, 0x75, 0x6d,
+ 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x3b, 0x0a, 0x20,
+ 0x20, 0x76, 0x61, 0x72, 0x20, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x4f, 0x66, 0x20, 0x3d, 0x20, 0x42, 0x6f, 0x6f, 0x6c, 0x65,
+ 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x3b, 0x0a, 0x20, 0x20,
+ 0x76, 0x61, 0x72, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x4f, 0x66, 0x20, 0x3d, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x3b, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72,
+ 0x20, 0x44, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79,
+ 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x20, 0x3d, 0x20,
+ 0x44, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x3b, 0x0a,
+ 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x52, 0x65, 0x67, 0x45, 0x78, 0x70,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x54, 0x6f, 0x53,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x52, 0x65, 0x67, 0x45,
+ 0x78, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3b, 0x0a, 0x20,
+ 0x20, 0x76, 0x61, 0x72, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x46, 0x6f, 0x72, 0x45, 0x61,
+ 0x63, 0x68, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x66, 0x6f, 0x72,
+ 0x45, 0x61, 0x63, 0x68, 0x3b, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20,
+ 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79,
+ 0x70, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72,
+ 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74,
+ 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x70, 0x3b, 0x0a, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20,
+ 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79,
+ 0x70, 0x65, 0x50, 0x75, 0x73, 0x68, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x70, 0x75, 0x73, 0x68, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x76, 0x61,
+ 0x72, 0x20, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66,
+ 0x3b, 0x0a, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66,
+ 0x20, 0x3d, 0x20, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x4f, 0x66, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74,
+ 0x63, 0x68, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x63, 0x6c, 0x61,
+ 0x73, 0x73, 0x4f, 0x66, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x29,
+ 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x73,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x54,
+ 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x63, 0x61, 0x6c, 0x6c,
+ 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x28, 0x38, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x2d, 0x20, 0x31, 0x29, 0x3b,
+ 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66,
+ 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68, 0x20, 0x28, 0x63, 0x6c,
+ 0x61, 0x73, 0x73, 0x4f, 0x66, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29,
+ 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61,
+ 0x73, 0x65, 0x20, 0x22, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x4f, 0x66, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61,
+ 0x73, 0x65, 0x20, 0x22, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x20, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x4f, 0x66, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61,
+ 0x73, 0x65, 0x20, 0x22, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x4f, 0x66, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61,
+ 0x73, 0x65, 0x20, 0x22, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x22,
+ 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
+ 0x74, 0x75, 0x72, 0x6e, 0x20, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x4f, 0x66, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x44, 0x61, 0x74, 0x65, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x20, 0x44, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66,
+ 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29,
+ 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d,
+ 0x0a, 0x0a, 0x0a, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x20, 0x50, 0x72, 0x65, 0x74, 0x74, 0x79, 0x50, 0x72, 0x69, 0x6e,
+ 0x74, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68, 0x20, 0x28, 0x74,
+ 0x79, 0x70, 0x65, 0x6f, 0x66, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29,
+ 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73,
+ 0x65, 0x20, 0x22, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x3a, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
+ 0x72, 0x6e, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x73, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29,
+ 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65,
+ 0x20, 0x22, 0x62, 0x69, 0x67, 0x69, 0x6e, 0x74, 0x22, 0x3a, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x29, 0x20, 0x2b, 0x20, 0x22, 0x6e, 0x22, 0x3b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x6e,
+ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x30, 0x20, 0x26, 0x26, 0x20, 0x28,
+ 0x31, 0x20, 0x2f, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x3c,
+ 0x20, 0x30, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x22,
+ 0x2d, 0x30, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x2f, 0x2f, 0x20, 0x46, 0x41, 0x4c, 0x4c, 0x54, 0x48, 0x52, 0x4f,
+ 0x55, 0x47, 0x48, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
+ 0x61, 0x73, 0x65, 0x20, 0x22, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e,
+ 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73,
+ 0x65, 0x20, 0x22, 0x75, 0x6e, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
+ 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73,
+ 0x65, 0x20, 0x22, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65,
+ 0x20, 0x22, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x22, 0x3a, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
+ 0x61, 0x73, 0x65, 0x20, 0x22, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22,
+ 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66,
+ 0x20, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20,
+ 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e,
+ 0x20, 0x22, 0x6e, 0x75, 0x6c, 0x6c, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x3d, 0x20, 0x63,
+ 0x6c, 0x61, 0x73, 0x73, 0x4f, 0x66, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73,
+ 0x77, 0x69, 0x74, 0x63, 0x68, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65,
+ 0x20, 0x22, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x3a, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73,
+ 0x65, 0x20, 0x22, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x22, 0x3a, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61,
+ 0x73, 0x65, 0x20, 0x22, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
+ 0x61, 0x73, 0x65, 0x20, 0x22, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e,
+ 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x44, 0x61, 0x74, 0x65, 0x22,
+ 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x2b, 0x20, 0x22,
+ 0x28, 0x22, 0x20, 0x2b, 0x20, 0x50, 0x72, 0x65, 0x74, 0x74, 0x79, 0x50,
+ 0x72, 0x69, 0x6e, 0x74, 0x28, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66,
+ 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x29, 0x20, 0x2b, 0x20, 0x22,
+ 0x29, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x52, 0x65, 0x67, 0x45,
+ 0x78, 0x70, 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
+ 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74,
+ 0x79, 0x70, 0x65, 0x54, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
+ 0x61, 0x73, 0x65, 0x20, 0x22, 0x41, 0x72, 0x72, 0x61, 0x79, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x76, 0x61, 0x72, 0x20, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x20,
+ 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, 0x2e, 0x63, 0x61, 0x6c, 0x6c,
+ 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x50, 0x72, 0x65, 0x74,
+ 0x74, 0x79, 0x50, 0x72, 0x69, 0x6e, 0x74, 0x41, 0x72, 0x72, 0x61, 0x79,
+ 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61,
+ 0x72, 0x20, 0x6a, 0x6f, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x41,
+ 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74, 0x79, 0x70,
+ 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x6d,
+ 0x61, 0x70, 0x70, 0x65, 0x64, 0x2c, 0x20, 0x22, 0x2c, 0x22, 0x29, 0x3b,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x22, 0x5b, 0x22, 0x20,
+ 0x2b, 0x20, 0x6a, 0x6f, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x2b, 0x20, 0x22,
+ 0x5d, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x55, 0x69, 0x6e, 0x74,
+ 0x38, 0x41, 0x72, 0x72, 0x61, 0x79, 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20,
+ 0x22, 0x49, 0x6e, 0x74, 0x38, 0x41, 0x72, 0x72, 0x61, 0x79, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
+ 0x61, 0x73, 0x65, 0x20, 0x22, 0x49, 0x6e, 0x74, 0x31, 0x36, 0x41, 0x72,
+ 0x72, 0x61, 0x79, 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x55, 0x69,
+ 0x6e, 0x74, 0x31, 0x36, 0x41, 0x72, 0x72, 0x61, 0x79, 0x22, 0x3a, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61,
+ 0x73, 0x65, 0x20, 0x22, 0x55, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x41, 0x72,
+ 0x72, 0x61, 0x79, 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x49, 0x6e,
+ 0x74, 0x33, 0x32, 0x41, 0x72, 0x72, 0x61, 0x79, 0x22, 0x3a, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73,
+ 0x65, 0x20, 0x22, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x33, 0x32, 0x41, 0x72,
+ 0x72, 0x61, 0x79, 0x22, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x22, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x36, 0x34, 0x41, 0x72, 0x72, 0x61, 0x79, 0x22, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x76, 0x61, 0x72, 0x20, 0x6a, 0x6f, 0x69, 0x6e, 0x65, 0x64, 0x20,
+ 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x74, 0x79, 0x70, 0x65, 0x4a, 0x6f, 0x69, 0x6e, 0x2e, 0x63, 0x61, 0x6c,
+ 0x6c, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x22, 0x2c, 0x22,
+ 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x2b, 0x20,
+ 0x22, 0x28, 0x5b, 0x22, 0x20, 0x2b, 0x20, 0x6a, 0x6f, 0x69, 0x6e, 0x65,
+ 0x64, 0x20, 0x2b, 0x20, 0x22, 0x5d, 0x29, 0x22, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x73, 0x65,
+ 0x20, 0x22, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x3a, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62,
+ 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x3a,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x2b, 0x20, 0x22, 0x28,
+ 0x29, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61,
+ 0x72, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x20, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x6f, 0x72, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x6d,
+ 0x65, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x61,
+ 0x6d, 0x65, 0x20, 0x2b, 0x20, 0x22, 0x28, 0x29, 0x22, 0x3b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x20, 0x22, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x29, 0x22,
+ 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x22, 0x2d, 0x2d, 0x20,
+ 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x20, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x20, 0x2d, 0x2d, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
+ 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x50, 0x72, 0x65, 0x74, 0x74, 0x79, 0x50,
+ 0x72, 0x69, 0x6e, 0x74, 0x41, 0x72, 0x72, 0x61, 0x79, 0x45, 0x6c, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20,
+ 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2c, 0x20, 0x61, 0x72, 0x72, 0x61, 0x79,
+ 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x75, 0x6e,
+ 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x26, 0x26, 0x20, 0x21,
+ 0x28, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x72,
+ 0x72, 0x61, 0x79, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e,
+ 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x20, 0x50, 0x72, 0x65, 0x74, 0x74, 0x79, 0x50, 0x72,
+ 0x69, 0x6e, 0x74, 0x28, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a,
+ 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x57,
+ 0x69, 0x74, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d,
+ 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x61,
+ 0x69, 0x6c, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x28, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x29, 0x20, 0x7b,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x6e,
+ 0x65, 0x77, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a,
+ 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65,
+ 0x54, 0x65, 0x78, 0x74, 0x28, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65,
+ 0x64, 0x54, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64,
+ 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x29, 0x20,
+ 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x22, 0x46, 0x61, 0x69,
+ 0x6c, 0x22, 0x20, 0x2b, 0x20, 0x22, 0x75, 0x72, 0x65, 0x22, 0x3b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6e, 0x61, 0x6d, 0x65,
+ 0x5f, 0x6f, 0x70, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x2b, 0x3d,
+ 0x20, 0x22, 0x20, 0x28, 0x22, 0x20, 0x2b, 0x20, 0x6e, 0x61, 0x6d, 0x65,
+ 0x5f, 0x6f, 0x70, 0x74, 0x20, 0x2b, 0x20, 0x22, 0x29, 0x22, 0x3b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76,
+ 0x61, 0x72, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x65, 0x78, 0x74,
+ 0x20, 0x3d, 0x20, 0x50, 0x72, 0x65, 0x74, 0x74, 0x79, 0x50, 0x72, 0x69,
+ 0x6e, 0x74, 0x28, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x29, 0x3b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x78, 0x70, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x54, 0x65, 0x78, 0x74, 0x2e, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x20, 0x3c, 0x3d, 0x20, 0x34, 0x30, 0x20, 0x26, 0x26, 0x20,
+ 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x65, 0x78, 0x74, 0x2e, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3c, 0x3d, 0x20, 0x34, 0x30, 0x29, 0x20,
+ 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x20, 0x2b, 0x3d, 0x20, 0x22, 0x3a, 0x20, 0x65, 0x78,
+ 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x3c, 0x22, 0x20, 0x2b, 0x20,
+ 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x65, 0x78, 0x74,
+ 0x20, 0x2b, 0x20, 0x22, 0x3e, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20,
+ 0x3c, 0x22, 0x20, 0x2b, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x65,
+ 0x78, 0x74, 0x20, 0x2b, 0x20, 0x22, 0x3e, 0x22, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x20, 0x2b, 0x3d, 0x20, 0x22, 0x3a, 0x5c, 0x6e, 0x65, 0x78, 0x70, 0x65,
+ 0x63, 0x74, 0x65, 0x64, 0x3a, 0x5c, 0x6e, 0x22, 0x20, 0x2b, 0x20, 0x65,
+ 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x65, 0x78, 0x74, 0x20,
+ 0x2b, 0x20, 0x22, 0x5c, 0x6e, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x5c,
+ 0x6e, 0x22, 0x20, 0x2b, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x65,
+ 0x78, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
+ 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x61,
+ 0x69, 0x6c, 0x28, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54,
+ 0x65, 0x78, 0x74, 0x2c, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20,
+ 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x29, 0x20, 0x7b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66,
+ 0x61, 0x69, 0x6c, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x28, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x46, 0x61, 0x69,
+ 0x6c, 0x75, 0x72, 0x65, 0x54, 0x65, 0x78, 0x74, 0x28, 0x65, 0x78, 0x70,
+ 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x65, 0x78, 0x74, 0x2c, 0x20, 0x66,
+ 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f,
+ 0x70, 0x74, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
+ 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x64, 0x65,
+ 0x65, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x71, 0x75, 0x61,
+ 0x6c, 0x73, 0x28, 0x61, 0x2c, 0x20, 0x62, 0x29, 0x20, 0x7b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x61, 0x50, 0x72, 0x6f, 0x70,
+ 0x73, 0x20, 0x3d, 0x20, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x6b,
+ 0x65, 0x79, 0x73, 0x28, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x61, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x73, 0x6f, 0x72, 0x74, 0x28,
+ 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x62,
+ 0x50, 0x72, 0x6f, 0x70, 0x73, 0x20, 0x3d, 0x20, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x2e, 0x6b, 0x65, 0x79, 0x73, 0x28, 0x62, 0x29, 0x3b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x62, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x73,
+ 0x6f, 0x72, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69,
+ 0x66, 0x20, 0x28, 0x21, 0x64, 0x65, 0x65, 0x70, 0x45, 0x71, 0x75, 0x61,
+ 0x6c, 0x73, 0x28, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x2c, 0x20, 0x62,
+ 0x50, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72,
+ 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20,
+ 0x61, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74,
+ 0x68, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x64, 0x65, 0x65,
+ 0x70, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x28, 0x61, 0x5b, 0x61, 0x50,
+ 0x72, 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, 0x5d, 0x2c, 0x20, 0x62, 0x5b,
+ 0x61, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, 0x5d, 0x29, 0x29,
+ 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72,
+ 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a,
+ 0x0a, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20,
+ 0x64, 0x65, 0x65, 0x70, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x28, 0x61,
+ 0x2c, 0x20, 0x62, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69,
+ 0x66, 0x20, 0x28, 0x61, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x62, 0x29, 0x20,
+ 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x2d, 0x30, 0x2e,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61,
+ 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75,
+ 0x72, 0x6e, 0x20, 0x28, 0x31, 0x20, 0x2f, 0x20, 0x61, 0x29, 0x20, 0x3d,
+ 0x3d, 0x3d, 0x20, 0x28, 0x31, 0x20, 0x2f, 0x20, 0x62, 0x29, 0x3b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e,
+ 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
+ 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x79, 0x70,
+ 0x65, 0x6f, 0x66, 0x20, 0x61, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, 0x79,
+ 0x70, 0x65, 0x6f, 0x66, 0x20, 0x62, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75,
+ 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x79, 0x70, 0x65, 0x6f, 0x66,
+ 0x20, 0x61, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x6e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x22, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
+ 0x69, 0x73, 0x4e, 0x61, 0x4e, 0x28, 0x61, 0x29, 0x20, 0x26, 0x26, 0x20,
+ 0x69, 0x73, 0x4e, 0x61, 0x4e, 0x28, 0x62, 0x29, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x79, 0x70, 0x65, 0x6f, 0x66,
+ 0x20, 0x61, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x22, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x22, 0x20, 0x26, 0x26, 0x20, 0x74, 0x79, 0x70, 0x65, 0x6f,
+ 0x66, 0x20, 0x61, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x22, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75,
+ 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4e, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72,
+ 0x20, 0x61, 0x20, 0x6e, 0x6f, 0x72, 0x20, 0x62, 0x20, 0x69, 0x73, 0x20,
+ 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x3d, 0x20, 0x63, 0x6c, 0x61,
+ 0x73, 0x73, 0x4f, 0x66, 0x28, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x63, 0x6c, 0x61,
+ 0x73, 0x73, 0x4f, 0x66, 0x28, 0x62, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74,
+ 0x75, 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22,
+ 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x22, 0x29, 0x20, 0x7b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x46, 0x6f, 0x72, 0x20,
+ 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x2c, 0x20, 0x6a, 0x75, 0x73, 0x74,
+ 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x20, 0x70, 0x61, 0x74,
+ 0x74, 0x65, 0x72, 0x6e, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x66, 0x6c, 0x61,
+ 0x67, 0x73, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x69, 0x74, 0x73,
+ 0x20, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
+ 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x74,
+ 0x79, 0x70, 0x65, 0x54, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x61, 0x29, 0x20, 0x3d, 0x3d, 0x3d, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x52, 0x65, 0x67, 0x45, 0x78, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x74, 0x79, 0x70, 0x65, 0x54, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x63, 0x61, 0x6c, 0x6c, 0x28, 0x62, 0x29, 0x3b, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x61, 0x72, 0x65,
+ 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x63, 0x61, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x6d, 0x73,
+ 0x65, 0x6c, 0x76, 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69,
+ 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61,
+ 0x73, 0x73, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x41, 0x72,
+ 0x72, 0x61, 0x79, 0x22, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x61, 0x2e,
+ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x62,
+ 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
+ 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66,
+ 0x6f, 0x72, 0x20, 0x28, 0x76, 0x61, 0x72, 0x20, 0x69, 0x20, 0x3d, 0x20,
+ 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x61, 0x2e, 0x6c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28,
+ 0x21, 0x64, 0x65, 0x65, 0x70, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x28,
+ 0x61, 0x5b, 0x69, 0x5d, 0x2c, 0x20, 0x62, 0x5b, 0x69, 0x5d, 0x29, 0x29,
+ 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
+ 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x3d, 0x3d, 0x3d, 0x20,
+ 0x22, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x20, 0x7c, 0x7c, 0x20,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20,
+ 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22,
+ 0x20, 0x7c, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x3d, 0x3d,
+ 0x3d, 0x20, 0x22, 0x42, 0x69, 0x67, 0x49, 0x6e, 0x74, 0x22, 0x20, 0x7c,
+ 0x7c, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73,
+ 0x73, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x42, 0x6f, 0x6f, 0x6c, 0x65,
+ 0x61, 0x6e, 0x22, 0x20, 0x7c, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73,
+ 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x44, 0x61, 0x74, 0x65, 0x22, 0x29,
+ 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
+ 0x28, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x28, 0x61, 0x29, 0x20,
+ 0x21, 0x3d, 0x3d, 0x20, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x66, 0x28,
+ 0x62, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x64,
+ 0x65, 0x65, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x71, 0x75,
+ 0x61, 0x6c, 0x73, 0x28, 0x61, 0x2c, 0x20, 0x62, 0x29, 0x3b, 0x0a, 0x20,
+ 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x61, 0x73, 0x73, 0x65, 0x72, 0x74,
+ 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x20, 0x3d, 0x20, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x73, 0x73, 0x65, 0x72, 0x74,
+ 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x28, 0x65, 0x78, 0x70, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x2c, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20,
+ 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x29, 0x20, 0x7b, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x64, 0x65, 0x65,
+ 0x70, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x28, 0x66, 0x6f, 0x75, 0x6e,
+ 0x64, 0x2c, 0x20, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x29,
+ 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x61,
+ 0x69, 0x6c, 0x28, 0x50, 0x72, 0x65, 0x74, 0x74, 0x79, 0x50, 0x72, 0x69,
+ 0x6e, 0x74, 0x28, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x29,
+ 0x2c, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x6e, 0x61, 0x6d,
+ 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x7d, 0x29, 0x28, 0x29, 0x3b,
+ 0x0a
+};
+unsigned int test_fuzzer_regexp_builtins_mjsunit_js_len = 5857;
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index b3fe174d95..8b90d5238c 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -14,13 +14,13 @@ class VariantsGenerator(testsuite.VariantsGenerator):
class TestSuite(testsuite.TestSuite):
- SUB_TESTS = ( 'json', 'parser', 'regexp', 'multi_return', 'wasm',
- 'wasm_async', 'wasm_call', 'wasm_code', 'wasm_compile',
+ SUB_TESTS = ( 'json', 'parser', 'regexp_builtins', 'regexp', 'multi_return', 'wasm',
+ 'wasm_async', 'wasm_code', 'wasm_compile',
'wasm_data_section', 'wasm_function_sigs_section',
'wasm_globals_section', 'wasm_imports_section', 'wasm_memory_section',
'wasm_names_section', 'wasm_types_section' )
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for subtest in TestSuite.SUB_TESTS:
for fname in os.listdir(os.path.join(self.root, subtest)):
@@ -37,12 +37,9 @@ class TestSuite(testsuite.TestSuite):
def _variants_gen_class(self):
return VariantsGenerator
- def _LegacyVariantsGeneratorFactory(self):
- return testsuite.StandardLegacyVariantsGenerator
-
class TestCase(testcase.TestCase):
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
suite, name = self.path.split('/')
return [os.path.join(self.suite.root, suite, name)]
@@ -52,7 +49,7 @@ class TestCase(testcase.TestCase):
def _get_statusfile_flags(self):
return []
- def _get_mode_flags(self, ctx):
+ def _get_mode_flags(self):
return []
def get_shell(self):
@@ -60,5 +57,5 @@ class TestCase(testcase.TestCase):
return 'v8_simple_%s_fuzzer' % group
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 4718601b0f..703c5cc4c8 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -13,8 +13,7 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-api.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-module-runner.h"
@@ -93,8 +92,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
Promise::Resolver::New(support->GetContext()));
Local<Promise> promise = resolver->GetPromise();
- AsyncCompile(i_isolate, Utils::OpenHandle(*promise),
- ModuleWireBytes(data, data + size), false);
+ i_isolate->wasm_engine()->AsyncCompile(i_isolate, Utils::OpenHandle(*promise),
+ ModuleWireBytes(data, data + size),
+ false);
ASSIGN(Function, instantiate_impl,
Function::New(support->GetContext(), &InstantiateCallback,
diff --git a/deps/v8/test/fuzzer/wasm-call.cc b/deps/v8/test/fuzzer/wasm-call.cc
deleted file mode 100644
index 5c85502381..0000000000
--- a/deps/v8/test/fuzzer/wasm-call.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "include/v8.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/utils.h"
-#include "src/wasm/wasm-interpreter.h"
-#include "src/wasm/wasm-module-builder.h"
-#include "src/wasm/wasm-module.h"
-#include "test/common/wasm/test-signatures.h"
-#include "test/common/wasm/wasm-module-runner.h"
-#include "test/fuzzer/fuzzer-support.h"
-#include "test/fuzzer/wasm-fuzzer-common.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-namespace fuzzer {
-
-static constexpr uint32_t kMaxNumFunctions = 3;
-static constexpr uint32_t kMaxNumParams = 3;
-
-class WasmCallFuzzer : public WasmExecutionFuzzer {
- template <typename V>
- static inline V read_value(const uint8_t** data, size_t* size, bool* ok) {
- // The status flag {ok} checks that the decoding up until now was okay, and
- // that a value of type V can be read without problems.
- *ok &= (*size > sizeof(V));
- if (!(*ok)) return 0;
- V result = ReadLittleEndianValue<V>(*data);
- *data += sizeof(V);
- *size -= sizeof(V);
- return result;
- }
-
- static void add_argument(Isolate* isolate, ValueType type,
- WasmValue* interpreter_args,
- Handle<Object>* compiler_args, int* argc,
- const uint8_t** data, size_t* size, bool* ok) {
- if (!(*ok)) return;
- switch (type) {
- case kWasmF32: {
- float value = read_value<float>(data, size, ok);
- interpreter_args[*argc] = WasmValue(value);
- compiler_args[*argc] =
- isolate->factory()->NewNumber(static_cast<double>(value));
- break;
- }
- case kWasmF64: {
- double value = read_value<double>(data, size, ok);
- interpreter_args[*argc] = WasmValue(value);
- compiler_args[*argc] = isolate->factory()->NewNumber(value);
- break;
- }
- case kWasmI32: {
- int32_t value = read_value<int32_t>(data, size, ok);
- interpreter_args[*argc] = WasmValue(value);
- compiler_args[*argc] =
- isolate->factory()->NewNumber(static_cast<double>(value));
- break;
- }
- default:
- UNREACHABLE();
- }
- (*argc)++;
- }
-
- bool GenerateModule(
- Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
- ZoneBuffer& buffer, int32_t& num_args,
- std::unique_ptr<WasmValue[]>& interpreter_args,
- std::unique_ptr<Handle<Object>[]>& compiler_args) override {
- bool ok = true;
- uint8_t num_functions =
- (read_value<uint8_t>(&data, &size, &ok) % kMaxNumFunctions) + 1;
-
- ValueType types[] = {kWasmF32, kWasmF64, kWasmI32, kWasmI64};
-
- interpreter_args.reset(new WasmValue[3]);
- compiler_args.reset(new Handle<Object>[3]);
-
- WasmModuleBuilder builder(zone);
- for (int fun = 0; fun < num_functions; fun++) {
- size_t num_params = static_cast<size_t>(
- (read_value<uint8_t>(&data, &size, &ok) % kMaxNumParams) + 1);
- FunctionSig::Builder sig_builder(zone, 1, num_params);
- sig_builder.AddReturn(kWasmI32);
- for (size_t param = 0; param < num_params; param++) {
- // The main function cannot handle int64 parameters.
- ValueType param_type = types[(read_value<uint8_t>(&data, &size, &ok) %
- (arraysize(types) - (fun == 0 ? 1 : 0)))];
- sig_builder.AddParam(param_type);
- if (fun == 0) {
- add_argument(isolate, param_type, interpreter_args.get(),
- compiler_args.get(), &num_args, &data, &size, &ok);
- }
- }
- WasmFunctionBuilder* f = builder.AddFunction(sig_builder.Build());
- uint32_t code_size = static_cast<uint32_t>(size / num_functions);
- f->EmitCode(data, code_size);
- uint8_t end_opcode = kExprEnd;
- f->EmitCode(&end_opcode, 1);
- data += code_size;
- size -= code_size;
- if (fun == 0) {
- builder.AddExport(CStrVector("main"), f);
- }
- }
-
- builder.SetMaxMemorySize(32);
- builder.WriteTo(buffer);
-
- if (!ok) {
- // The input data was too short.
- return 0;
- }
- return true;
- }
-};
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return WasmCallFuzzer().FuzzWasmModule(data, size);
-}
-
-} // namespace fuzzer
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 4192a938e8..f4b2a912c6 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -31,6 +31,7 @@ namespace fuzzer {
namespace {
constexpr int kMaxFunctions = 4;
+constexpr int kMaxGlobals = 64;
class DataRange {
const uint8_t* data_;
@@ -137,6 +138,21 @@ class WasmGenerator {
Generate<T>(data);
}
+ enum IfType { kIf, kIfElse };
+
+ template <ValueType T, IfType type>
+ void if_(DataRange& data) {
+ static_assert(T == kWasmStmt || type == kIfElse,
+ "if without else cannot produce a value");
+ Generate<kWasmI32>(data);
+ BlockScope block_scope(this, kExprIf, T, T);
+ Generate<T>(data);
+ if (type == kIfElse) {
+ builder_->Emit(kExprElse);
+ Generate<T>(data);
+ }
+ }
+
void br(DataRange& data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
@@ -148,6 +164,20 @@ class WasmGenerator {
kExprBr, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
}
+ template <ValueType wanted_type>
+ void br_if(DataRange& data) {
+ // There is always at least the block representing the function body.
+ DCHECK(!blocks_.empty());
+ const uint32_t target_block = data.get<uint32_t>() % blocks_.size();
+ const ValueType break_type = blocks_[target_block];
+
+ Generate(break_type, data);
+ Generate(kWasmI32, data);
+ builder_->EmitWithI32V(
+ kExprBrIf, static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
+ ConvertOrGenerate(break_type, wanted_type, data);
+ }
+
// TODO(eholk): make this function constexpr once gcc supports it
static uint8_t max_alignment(WasmOpcode memop) {
switch (memop) {
@@ -234,6 +264,17 @@ class WasmGenerator {
builder_->Emit(kConvertOpcodes[arr_idx]);
}
+ void ConvertOrGenerate(ValueType src, ValueType dst, DataRange& data) {
+ if (src == dst) return;
+ if (src == kWasmStmt && dst != kWasmStmt) {
+ Generate(dst, data);
+ } else if (dst == kWasmStmt && src != kWasmStmt) {
+ builder_->Emit(kExprDrop);
+ } else {
+ Convert(src, dst);
+ }
+ }
+
void call(DataRange& data, ValueType wanted_type) {
int func_index = data.get<uint8_t>() % functions_.size();
FunctionSig* sig = functions_[func_index];
@@ -258,15 +299,15 @@ class WasmGenerator {
}
}
- struct Local {
+ struct Var {
uint32_t index;
ValueType type = kWasmStmt;
- Local() = default;
- Local(uint32_t index, ValueType type) : index(index), type(type) {}
+ Var() = default;
+ Var(uint32_t index, ValueType type) : index(index), type(type) {}
bool is_valid() const { return type != kWasmStmt; }
};
- Local GetRandomLocal(DataRange& data) {
+ Var GetRandomLocal(DataRange& data) {
uint32_t num_params =
static_cast<uint32_t>(builder_->signature()->parameter_count());
uint32_t num_locals = static_cast<uint32_t>(locals_.size());
@@ -279,7 +320,7 @@ class WasmGenerator {
template <ValueType wanted_type>
void local_op(DataRange& data, WasmOpcode opcode) {
- Local local = GetRandomLocal(data);
+ Var local = GetRandomLocal(data);
// If there are no locals and no parameters, just generate any value (if a
// value is needed), or do nothing.
if (!local.is_valid()) {
@@ -296,6 +337,7 @@ class WasmGenerator {
template <ValueType wanted_type>
void get_local(DataRange& data) {
+ static_assert(wanted_type != kWasmStmt, "illegal type");
local_op<wanted_type>(data, kExprGetLocal);
}
@@ -306,6 +348,46 @@ class WasmGenerator {
local_op<wanted_type>(data, kExprTeeLocal);
}
+ Var GetRandomGlobal(DataRange& data, bool ensure_mutable) {
+ uint32_t index;
+ if (ensure_mutable) {
+ if (mutable_globals_.empty()) return {};
+ index = mutable_globals_[data.get<uint8_t>() % mutable_globals_.size()];
+ } else {
+ if (globals_.empty()) return {};
+ index = data.get<uint8_t>() % globals_.size();
+ }
+ ValueType type = globals_[index];
+ return {index, type};
+ }
+
+ template <ValueType wanted_type>
+ void global_op(DataRange& data) {
+ constexpr bool is_set = wanted_type == kWasmStmt;
+ Var global = GetRandomGlobal(data, is_set);
+ // If there are no globals, just generate any value (if a value is needed),
+ // or do nothing.
+ if (!global.is_valid()) {
+ if (wanted_type == kWasmStmt) return;
+ return Generate<wanted_type>(data);
+ }
+
+ if (is_set) Generate(global.type, data);
+ builder_->EmitWithU32V(is_set ? kExprSetGlobal : kExprGetGlobal,
+ global.index);
+ if (!is_set && global.type != wanted_type) {
+ Convert(global.type, wanted_type);
+ }
+ }
+
+ template <ValueType wanted_type>
+ void get_global(DataRange& data) {
+ static_assert(wanted_type != kWasmStmt, "illegal type");
+ global_op<wanted_type>(data);
+ }
+
+ void set_global(DataRange& data) { global_op<kWasmStmt>(data); }
+
template <ValueType T1, ValueType T2>
void sequence(DataRange& data) {
Generate<T1, T2>(data);
@@ -343,8 +425,13 @@ class WasmGenerator {
public:
WasmGenerator(WasmFunctionBuilder* fn,
- const std::vector<FunctionSig*>& functions, DataRange& data)
- : builder_(fn), functions_(functions) {
+ const std::vector<FunctionSig*>& functions,
+ const std::vector<ValueType>& globals,
+ const std::vector<uint8_t>& mutable_globals, DataRange& data)
+ : builder_(fn),
+ functions_(functions),
+ globals_(globals),
+ mutable_globals_(mutable_globals) {
FunctionSig* sig = fn->signature();
DCHECK_GE(1, sig->return_count());
blocks_.push_back(sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0));
@@ -375,6 +462,8 @@ class WasmGenerator {
std::vector<ValueType> blocks_;
const std::vector<FunctionSig*>& functions_;
std::vector<ValueType> locals_;
+ std::vector<ValueType> globals_;
+ std::vector<uint8_t> mutable_globals_; // indexes into {globals_}.
uint32_t recursion_depth = 0;
static constexpr uint32_t kMaxRecursionDepth = 64;
@@ -390,9 +479,13 @@ void WasmGenerator::Generate<kWasmStmt>(DataRange& data) {
if (recursion_limit_reached() || data.size() == 0) return;
constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmStmt, kWasmStmt>,
&WasmGenerator::block<kWasmStmt>,
&WasmGenerator::loop<kWasmStmt>,
+ &WasmGenerator::if_<kWasmStmt, kIf>,
+ &WasmGenerator::if_<kWasmStmt, kIfElse>,
&WasmGenerator::br,
+ &WasmGenerator::br_if<kWasmStmt>,
&WasmGenerator::memop<kExprI32StoreMem, kWasmI32>,
&WasmGenerator::memop<kExprI32StoreMem8, kWasmI32>,
@@ -408,7 +501,8 @@ void WasmGenerator::Generate<kWasmStmt>(DataRange& data) {
&WasmGenerator::call<kWasmStmt>,
- &WasmGenerator::set_local};
+ &WasmGenerator::set_local,
+ &WasmGenerator::set_global};
GenerateOneOf(alternates, data);
}
@@ -481,6 +575,8 @@ void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
&WasmGenerator::block<kWasmI32>,
&WasmGenerator::loop<kWasmI32>,
+ &WasmGenerator::if_<kWasmI32, kIfElse>,
+ &WasmGenerator::br_if<kWasmI32>,
&WasmGenerator::memop<kExprI32LoadMem>,
&WasmGenerator::memop<kExprI32LoadMem8S>,
@@ -493,6 +589,7 @@ void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
&WasmGenerator::get_local<kWasmI32>,
&WasmGenerator::tee_local<kWasmI32>,
+ &WasmGenerator::get_global<kWasmI32>,
&WasmGenerator::call<kWasmI32>};
@@ -534,6 +631,8 @@ void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
&WasmGenerator::block<kWasmI64>,
&WasmGenerator::loop<kWasmI64>,
+ &WasmGenerator::if_<kWasmI64, kIfElse>,
+ &WasmGenerator::br_if<kWasmI64>,
&WasmGenerator::memop<kExprI64LoadMem>,
&WasmGenerator::memop<kExprI64LoadMem8S>,
@@ -545,6 +644,7 @@ void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
&WasmGenerator::get_local<kWasmI64>,
&WasmGenerator::tee_local<kWasmI64>,
+ &WasmGenerator::get_global<kWasmI64>,
&WasmGenerator::call<kWasmI64>};
@@ -568,11 +668,14 @@ void WasmGenerator::Generate<kWasmF32>(DataRange& data) {
&WasmGenerator::block<kWasmF32>,
&WasmGenerator::loop<kWasmF32>,
+ &WasmGenerator::if_<kWasmF32, kIfElse>,
+ &WasmGenerator::br_if<kWasmF32>,
&WasmGenerator::memop<kExprF32LoadMem>,
&WasmGenerator::get_local<kWasmF32>,
&WasmGenerator::tee_local<kWasmF32>,
+ &WasmGenerator::get_global<kWasmF32>,
&WasmGenerator::call<kWasmF32>};
@@ -596,11 +699,14 @@ void WasmGenerator::Generate<kWasmF64>(DataRange& data) {
&WasmGenerator::block<kWasmF64>,
&WasmGenerator::loop<kWasmF64>,
+ &WasmGenerator::if_<kWasmF64, kIfElse>,
+ &WasmGenerator::br_if<kWasmF64>,
&WasmGenerator::memop<kExprF64LoadMem>,
&WasmGenerator::get_local<kWasmF64>,
&WasmGenerator::tee_local<kWasmF64>,
+ &WasmGenerator::get_global<kWasmF64>,
&WasmGenerator::call<kWasmF64>};
@@ -664,6 +770,22 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
function_signatures.push_back(GenerateSig(zone, range));
}
+ int num_globals = range.get<uint8_t>() % (kMaxGlobals + 1);
+ std::vector<ValueType> globals;
+ std::vector<uint8_t> mutable_globals;
+ globals.reserve(num_globals);
+ mutable_globals.reserve(num_globals);
+
+ for (int i = 0; i < num_globals; ++i) {
+ ValueType type = GetValueType(range);
+ const bool exported = range.get<bool>();
+ // 1/8 of globals are immutable.
+ const bool mutability = (range.get<uint8_t>() % 8) != 0;
+ builder.AddGlobal(type, exported, mutability, WasmInitExpr());
+ globals.push_back(type);
+ if (mutability) mutable_globals.push_back(static_cast<uint8_t>(i));
+ }
+
for (int i = 0; i < num_functions; ++i) {
DataRange function_range =
i == num_functions - 1 ? std::move(range) : range.split();
@@ -671,7 +793,8 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
FunctionSig* sig = function_signatures[i];
WasmFunctionBuilder* f = builder.AddFunction(sig);
- WasmGenerator gen(f, function_signatures, function_range);
+ WasmGenerator gen(f, function_signatures, globals, mutable_globals,
+ function_range);
ValueType return_type =
sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0);
gen.Generate(return_type, function_range);
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 46f5133486..6884728ff1 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -7,8 +7,6 @@
#include "include/v8.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-api.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -68,26 +66,40 @@ int FuzzWasmSection(SectionCode section, const uint8_t* data, size_t size) {
void InterpretAndExecuteModule(i::Isolate* isolate,
Handle<WasmModuleObject> module_object) {
- ScheduledErrorThrower thrower(isolate, "WebAssembly Instantiation");
- // Try to instantiate and interpret the module_object.
- MaybeHandle<WasmInstanceObject> maybe_instance =
- SyncInstantiate(isolate, &thrower, module_object,
- Handle<JSReceiver>::null(), // imports
- MaybeHandle<JSArrayBuffer>()); // memory
+ ErrorThrower thrower(isolate, "WebAssembly Instantiation");
+ MaybeHandle<WasmInstanceObject> maybe_instance;
Handle<WasmInstanceObject> instance;
- if (!maybe_instance.ToHandle(&instance)) return;
+
+ // Try to instantiate and interpret the module_object.
+ maybe_instance = isolate->wasm_engine()->SyncInstantiate(
+ isolate, &thrower, module_object,
+ Handle<JSReceiver>::null(), // imports
+ MaybeHandle<JSArrayBuffer>()); // memory
+ if (!maybe_instance.ToHandle(&instance)) {
+ isolate->clear_pending_exception();
+ thrower.Reset(); // Ignore errors.
+ return;
+ }
if (!testing::InterpretWasmModuleForTesting(isolate, instance, "main", 0,
nullptr)) {
+ isolate->clear_pending_exception();
return;
}
- // Instantiate and execute the module_object.
- maybe_instance = SyncInstantiate(isolate, &thrower, module_object,
- Handle<JSReceiver>::null(), // imports
- MaybeHandle<JSArrayBuffer>()); // memory
- if (!maybe_instance.ToHandle(&instance)) return;
-
- testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
+ // Try to instantiate and execute the module_object.
+ maybe_instance = isolate->wasm_engine()->SyncInstantiate(
+ isolate, &thrower, module_object,
+ Handle<JSReceiver>::null(), // imports
+ MaybeHandle<JSArrayBuffer>()); // memory
+ if (!maybe_instance.ToHandle(&instance)) {
+ isolate->clear_pending_exception();
+ thrower.Reset(); // Ignore errors.
+ return;
+ }
+ if (testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr) < 0) {
+ isolate->clear_pending_exception();
+ return;
+ }
}
namespace {
@@ -123,6 +135,15 @@ std::ostream& operator<<(std::ostream& os, const PrintSig& print) {
return os << "]";
}
+struct PrintName {
+ WasmName name;
+ PrintName(ModuleWireBytes wire_bytes, WireBytesRef ref)
+ : name(wire_bytes.GetNameOrNull(ref)) {}
+};
+std::ostream& operator<<(std::ostream& os, const PrintName& name) {
+ return os.write(name.name.start(), name.name.size());
+}
+
void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
bool compiles) {
constexpr bool kVerifyFunctions = false;
@@ -155,12 +176,17 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
}
+ for (WasmGlobal& glob : module->globals) {
+ os << " builder.addGlobal(" << ValueTypeToConstantName(glob.type) << ", "
+ << glob.mutability << ");\n";
+ }
+
Zone tmp_zone(isolate->allocator(), ZONE_NAME);
for (const WasmFunction& func : module->functions) {
Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
- os << " // Generate function " << func.func_index + 1 << " of "
- << module->functions.size() << ".\n";
+ os << " // Generate function " << func.func_index << " (out of "
+ << module->functions.size() << ").\n";
// Generate signature.
os << " sig" << func.func_index << " = makeSig("
<< PrintParameters(func.sig) << ", " << PrintReturns(func.sig) << ");\n";
@@ -190,16 +216,21 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
FunctionBody func_body(func.sig, func.code.offset(), func_code.start(),
func_code.end());
PrintRawWasmCode(isolate->allocator(), func_body, module, kOmitLocals);
- os << " ])";
- if (func.func_index == 0) os << "\n .exportAs('main')";
- os << ";\n ";
+ os << " ]);\n";
+ }
+
+ for (WasmExport& exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ os << " builder.addExport('" << PrintName(wire_bytes, exp.name) << "', "
+ << exp.index << ");\n";
}
if (compiles) {
os << " var module = builder.instantiate();\n"
" module.exports.main(1, 2, 3);\n";
} else {
- os << " assertThrows(function() { builder.instantiate(); });\n";
+ os << " assertThrows(function() { builder.instantiate(); }, "
+ "WebAssembly.CompileError);\n";
}
os << "})();\n";
}
@@ -241,7 +272,8 @@ int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
MaybeHandle<WasmModuleObject> compiled_module;
{
FlagScope<bool> no_liftoff(&FLAG_liftoff, false);
- compiled_module = SyncCompile(i_isolate, &interpreter_thrower, wire_bytes);
+ compiled_module = i_isolate->wasm_engine()->SyncCompile(
+ i_isolate, &interpreter_thrower, wire_bytes);
}
bool compiles = !compiled_module.is_null();
@@ -260,9 +292,10 @@ int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
int32_t result_interpreter;
bool possible_nondeterminism = false;
{
- MaybeHandle<WasmInstanceObject> interpreter_instance = SyncInstantiate(
- i_isolate, &interpreter_thrower, compiled_module.ToHandleChecked(),
- MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
+ MaybeHandle<WasmInstanceObject> interpreter_instance =
+ i_isolate->wasm_engine()->SyncInstantiate(
+ i_isolate, &interpreter_thrower, compiled_module.ToHandleChecked(),
+ MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
// Ignore instantiation failure.
if (interpreter_thrower.error()) {
@@ -286,9 +319,10 @@ int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
int32_t result_turbofan;
{
ErrorThrower compiler_thrower(i_isolate, "Turbofan");
- MaybeHandle<WasmInstanceObject> compiled_instance = SyncInstantiate(
- i_isolate, &compiler_thrower, compiled_module.ToHandleChecked(),
- MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
+ MaybeHandle<WasmInstanceObject> compiled_instance =
+ i_isolate->wasm_engine()->SyncInstantiate(
+ i_isolate, &compiler_thrower, compiled_module.ToHandleChecked(),
+ MaybeHandle<JSReceiver>(), MaybeHandle<JSArrayBuffer>());
DCHECK(!compiler_thrower.error());
result_turbofan = testing::CallWasmFunctionForTesting(
@@ -315,9 +349,8 @@ int WasmExecutionFuzzer::FuzzWasmModule(const uint8_t* data, size_t size,
ErrorThrower compiler_thrower(i_isolate, "Liftoff");
// Re-compile with Liftoff.
MaybeHandle<WasmInstanceObject> compiled_instance =
- SyncCompileAndInstantiate(i_isolate, &compiler_thrower, wire_bytes,
- MaybeHandle<JSReceiver>(),
- MaybeHandle<JSArrayBuffer>());
+ testing::CompileAndInstantiateForTesting(i_isolate, &compiler_thrower,
+ wire_bytes);
DCHECK(!compiler_thrower.error());
result_liftoff = testing::CallWasmFunctionForTesting(
i_isolate, compiled_instance.ToHandleChecked(), &compiler_thrower,
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 567e68b40a..99642b1182 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -12,7 +12,7 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-module-runner.h"
@@ -42,8 +42,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
i::HandleScope scope(i_isolate);
i::wasm::ErrorThrower thrower(i_isolate, "wasm fuzzer");
- i::MaybeHandle<i::WasmModuleObject> maybe_object = SyncCompile(
- i_isolate, &thrower, i::wasm::ModuleWireBytes(data, data + size));
+ i::MaybeHandle<i::WasmModuleObject> maybe_object =
+ i_isolate->wasm_engine()->SyncCompile(
+ i_isolate, &thrower, i::wasm::ModuleWireBytes(data, data + size));
i::Handle<i::WasmModuleObject> module_object;
if (maybe_object.ToHandle(&module_object)) {
i::wasm::fuzzer::InterpretAndExecuteModule(i_isolate, module_object);
diff --git a/deps/v8/test/fuzzer/wasm_call/foo b/deps/v8/test/fuzzer/wasm_call/foo
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/test/fuzzer/wasm_call/foo
+++ /dev/null
diff --git a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
index 1bf0317b21..15304c2073 100644
--- a/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-locations-await-expected.txt
@@ -5,11 +5,11 @@ Running test: testBreakLocations
function testFunction() {
async function f1() {
for (let x = |_|0; x |_|< 1; ++|_|x) |_|await x;
- |_|return await Promise.|C|resolve(2);
+ |_|return |_|await Promise.|C|resolve(2);
|R|}
async function f2() {
- let r = |_|await |C|f1() + await |C|f1();
+ let r = |_|await |C|f1() + |_|await |C|f1();
|_|await |C|f1();
|_|await |C|f1().|C|then(x => x |_|* 2|R|);
|_|await [1].|C|map(x => Promise.|C|resolve(x)|R|)[0];
diff --git a/deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts-expected.txt b/deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts-expected.txt
new file mode 100644
index 0000000000..8bb4fd211f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts-expected.txt
@@ -0,0 +1,17 @@
+Checks that inspector collects old faied to parse anonymous scripts.
+Generate 1000 scriptFailedToParse events
+error:0
+success:1000
+Generate three scriptFailedToParse event for non anonymous script
+error:0
+success:1003
+Generate one more scriptFailedToParse event for anonymous script
+error:100
+success:904
+Check that latest script is still available
+{
+ id : <messageId>
+ result : {
+ scriptSource : }
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts.js b/deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts.js
new file mode 100644
index 0000000000..dfe0fb6493
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/cleanup-old-failed-to-parse-anonymous-scripts.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Checks that inspector collects old faied to parse anonymous scripts.');
+
+(async function main() {
+ Protocol.Debugger.enable();
+ const scriptIds = [];
+ Protocol.Debugger.onScriptFailedToParse(
+ message => scriptIds.push(message.params.scriptId));
+ InspectorTest.log('Generate 1000 scriptFailedToParse events');
+ await Protocol.Runtime.evaluate({
+ expression: `for (var i = 0; i < 1000; ++i) {
+ try { JSON.parse('}'); } catch(e) {}
+ }`
+ });
+ await dumpScriptIdsStats(scriptIds);
+ InspectorTest.log(
+ 'Generate three scriptFailedToParse event for non anonymous script');
+ for (var i = 0; i < 3; ++i) {
+ await Protocol.Runtime.evaluate({expression: '}//# sourceURL=foo.js'});
+ }
+ await dumpScriptIdsStats(scriptIds);
+ InspectorTest.log(
+ 'Generate one more scriptFailedToParse event for anonymous script');
+ await Protocol.Runtime.evaluate(
+ {expression: `try {JSON.parse('}');} catch(e){}`});
+ await dumpScriptIdsStats(scriptIds);
+ InspectorTest.log('Check that latest script is still available');
+ InspectorTest.logMessage(await Protocol.Debugger.getScriptSource(
+ {scriptId: scriptIds[scriptIds.length - 1]}));
+ InspectorTest.completeTest();
+})();
+
+async function dumpScriptIdsStats(scriptIds) {
+ let errors = 0;
+ let success = 0;
+ for (let scriptId of scriptIds) {
+ const result =
+ await Protocol.Debugger.getScriptSource({scriptId: scriptId});
+ if (result.error)
+ ++errors;
+ else
+ ++success;
+ }
+ InspectorTest.log(`error:${errors}\nsuccess:${success}`);
+}
diff --git a/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt b/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt
index 7639ab6537..5cbc451d5e 100644
--- a/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt
+++ b/deps/v8/test/inspector/debugger/es6-module-script-parsed-expected.txt
@@ -8,7 +8,7 @@ Running test: testLoadedModulesOnDebuggerEnable
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : false
- hash : F8E59942466284E2766FD161CA6FFD024048A807
+ hash : DD672B308FB57B18CCEBFA236C372DBBC001DAD2
isLiveEdit : false
isModule : true
length : 39
@@ -28,7 +28,7 @@ Running test: testScriptEventsWhenDebuggerIsEnabled
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : false
- hash : F8E59942466284E2766FD161CA6FFD024048A807
+ hash : DD672B308FB57B18CCEBFA236C372DBBC001DAD2
isLiveEdit : false
isModule : true
length : 39
diff --git a/deps/v8/test/inspector/debugger/eval-without-codegen-expected.txt b/deps/v8/test/inspector/debugger/eval-without-codegen-expected.txt
new file mode 100644
index 0000000000..04de9bb226
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/eval-without-codegen-expected.txt
@@ -0,0 +1,32 @@
+Tests that evaluation works when code generation from strings is not allowed.
+
+Running test: testEvaluateNotPaused
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : string
+ value : Global1
+ }
+ }
+}
+
+Running test: testEvaluatePaused
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : string
+ value : Global2
+ }
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : string
+ value : Local
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/eval-without-codegen.js b/deps/v8/test/inspector/debugger/eval-without-codegen.js
new file mode 100644
index 0000000000..18600d87a9
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/eval-without-codegen.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests that evaluation works when code generation from strings is not allowed.');
+
+Protocol.Debugger.enable();
+Protocol.Runtime.enable();
+
+InspectorTest.runAsyncTestSuite([
+ async function testEvaluateNotPaused() {
+ contextGroup.addScript(`inspector.setAllowCodeGenerationFromStrings(false);
+ var global1 = 'Global1';`);
+ await Protocol.Debugger.onceScriptParsed();
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'global1'}));
+ },
+
+ async function testEvaluatePaused() {
+ contextGroup.addScript(`inspector.setAllowCodeGenerationFromStrings(false);
+ var global2 = 'Global2';
+ function foo(x) {
+ var local = 'Local';
+ debugger;
+ return local + x;
+ }
+ foo();`);
+ let {params: {callFrames: [{callFrameId}]}} =
+ await Protocol.Debugger.oncePaused();
+
+ InspectorTest.logMessage(
+ await Protocol.Runtime.evaluate({expression: 'global2'}));
+ InspectorTest.logMessage(await Protocol.Debugger.evaluateOnCallFrame(
+ {callFrameId, expression: 'local'}));
+ await Protocol.Debugger.resume();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
index 9c65ba2325..8e177c45f4 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
@@ -6,7 +6,7 @@ getPossibleBreakpoints should not crash during lazy compilation (crbug.com/71533
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : true
- hash : FA2A959297747012766FE9C5006E7F522D88FA72
+ hash : 720BC4212F9AA3E834D156055BEFEDC32027C816
isLiveEdit : false
isModule : false
length : 52
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
index c304fe677b..6679386784 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
@@ -152,7 +152,7 @@ function testSwitch() {
}
|R|}
-function* idMaker() {
+function* idMaker|_|() {
|_|yield 1;
|_|yield 2;
|_|yield 3;
@@ -230,8 +230,8 @@ async function testPromiseAsyncWithCode() {
|_|await p;
|R|}
|C|setTimeout(returnCall, 0);
- await |C|foo();
- await |C|foo();
+ |_|await |C|foo();
+ |_|await |C|foo();
|C|nextTest();
|R|}
|C|main();
@@ -252,7 +252,7 @@ async function testPromiseComplex() {
|R|}
var x = |_|1;
var y = |_|2;
- |C|returnFunction(|C|emptyFunction(), x++, --y, x => 2 |_|* x|R|, |C|returnCall())|C|().a = await |C|foo((a => 2 |_|*a|R|)|C|(5));
+ |C|returnFunction(|C|emptyFunction(), x++, --y, x => 2 |_|* x|R|, |C|returnCall())|C|().a = |_|await |C|foo((a => 2 |_|*a|R|)|C|(5));
|C|nextTest();
|R|}
|C|main();
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
index 53e24c0027..5c0ad23aab 100644
--- a/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
@@ -8,7 +8,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 2ACD11425BF645075D5696189A7BA7EB4048D6F7
+ hash : 662C2191D614E753232391E558B6EC90C2F0E2F1
isLiveEdit : false
isModule : false
length : 42
@@ -27,7 +27,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 0CD3C4132CE40E415D5696189A7BA7EB4048D6F7
+ hash : 362F9F016AAA39B904701571F19E8FA0B174F0B3
isLiveEdit : false
isModule : false
length : 52
@@ -46,7 +46,7 @@ scriptParsed
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 0E35E860580550805D5681F7BCD47D855B794EB3
+ hash : 976FA8F0184B894A9B10AFE4AC94EC437EE4FB60
isLiveEdit : false
isModule : false
length : 43
@@ -65,7 +65,7 @@ scriptParsed
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : true
- hash : AD4760425BF645075D5681F7D1786BD4DA9A86A7
+ hash : 3F4F1CC20C664E61705CD011E025DD093676ED54
isLiveEdit : false
isModule : false
length : 46
@@ -84,7 +84,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 3E60CE61913387795D5696189A7BA7EB4048B715
+ hash : 55928721655B662420FC9F66450A7C2C327C8C36
isLiveEdit : false
isModule : false
length : 50
@@ -103,7 +103,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 3E60CE617215C621E8825B319A7BA7EB4048B715
+ hash : CF0E583119A280424958512525ECCFB3B4F7A262
isLiveEdit : false
isModule : false
length : 60
@@ -122,7 +122,7 @@ scriptParsed
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : false
- hash : F2A32D219EFACBF470C48663BCD47D85864D1A22
+ hash : D1FA51B286EA72144EC0E44681120344F2D2EBD5
isLiveEdit : false
isModule : false
length : 51
@@ -141,7 +141,7 @@ scriptParsed
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 46927EC1BFB78C355D5681F7D1786BD4216D5124
+ hash : 7ABA6300FC0D3908C0536EE2EFF60B48E36FFA35
isLiveEdit : false
isModule : false
length : 54
@@ -160,7 +160,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 793E2411EDECF405E8825B31460F7CD9A656E096
+ hash : EB6DE473AF1788035E434F41ABC86140BD638644
isLiveEdit : false
isModule : false
length : 88
@@ -179,7 +179,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : DED761235CC24EE21D0E9277CF599B5A292D8053
+ hash : 63710BD01F77B0F0C00E0662B45B782A5976A206
isLiveEdit : false
isModule : false
length : 89
@@ -198,7 +198,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 9CF28A415BF64507692D5A84D1786BD4437C3511
+ hash : FC51669018C05471C8F46535240B9C77110E7291
isLiveEdit : false
isModule : false
length : 40
@@ -217,7 +217,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : BEE49C625BF64507692D5A84D1786BD4437C3511
+ hash : FE0F41315BEF7930C8F465359F53D504110E7291
isLiveEdit : false
isModule : false
length : 41
@@ -236,7 +236,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A8443D429DC872EBA18
+ hash : 1D33B330614150F8F7EB45184E245F542E304118
isLiveEdit : false
isModule : false
length : 18
@@ -255,7 +255,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : D26251F019679C792E2B4EF437A20E96134D5C26
+ hash : 474CCF101B1996849E154ED20F854F368C793767
isLiveEdit : false
isModule : false
length : 96
@@ -274,7 +274,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 814AEF635BF64507692D5A84BCD47D85C4031CD2
+ hash : DD0A3C60B97506161FA15D9419969A90DF33E2E0
isLiveEdit : false
isModule : false
length : 39
@@ -293,7 +293,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A84A8DC8126872EBA18
+ hash : 1D33B330A774BD52F7EB45184E245F54C94FB815
isLiveEdit : false
isModule : false
length : 19
@@ -312,7 +312,7 @@ scriptParsed
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A8443D429DC872EBA18
+ hash : 1D33B330EA93E221F7EB45184E245F5479702083
isLiveEdit : false
isModule : false
length : 20
@@ -331,7 +331,7 @@ scriptParsed
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A84A8DC8126872EBA18
+ hash : 867981735982F829F7EB45184E245F5479702083
isLiveEdit : false
isModule : false
length : 21
@@ -350,7 +350,7 @@ scriptParsed
endLine : 4
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A8443D429DC872EBA18
+ hash : 095D6881D29C0D76F7EB45184E245F5479702083
isLiveEdit : false
isModule : false
length : 22
@@ -369,7 +369,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 2ACD11425BF645075D5696189A7BA7EB4048D6F7
+ hash : 662C2191D614E753232391E558B6EC90C2F0E2F1
isLiveEdit : false
isModule : false
length : 42
@@ -388,7 +388,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 0CD3C4132CE40E415D5696189A7BA7EB4048D6F7
+ hash : 362F9F016AAA39B904701571F19E8FA0B174F0B3
isLiveEdit : false
isModule : false
length : 52
@@ -407,7 +407,7 @@ scriptParsed
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 0E35E860580550805D5681F7BCD47D855B794EB3
+ hash : 976FA8F0184B894A9B10AFE4AC94EC437EE4FB60
isLiveEdit : false
isModule : false
length : 43
@@ -426,7 +426,7 @@ scriptParsed
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : true
- hash : AD4760425BF645075D5681F7D1786BD4DA9A86A7
+ hash : 3F4F1CC20C664E61705CD011E025DD093676ED54
isLiveEdit : false
isModule : false
length : 46
@@ -445,7 +445,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 3E60CE61913387795D5696189A7BA7EB4048B715
+ hash : 55928721655B662420FC9F66450A7C2C327C8C36
isLiveEdit : false
isModule : false
length : 50
@@ -464,7 +464,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 3E60CE617215C621E8825B319A7BA7EB4048B715
+ hash : CF0E583119A280424958512525ECCFB3B4F7A262
isLiveEdit : false
isModule : false
length : 60
@@ -483,7 +483,7 @@ scriptParsed
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : false
- hash : F2A32D219EFACBF470C48663BCD47D85864D1A22
+ hash : D1FA51B286EA72144EC0E44681120344F2D2EBD5
isLiveEdit : false
isModule : false
length : 51
@@ -502,7 +502,7 @@ scriptParsed
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 46927EC1BFB78C355D5681F7D1786BD4216D5124
+ hash : 7ABA6300FC0D3908C0536EE2EFF60B48E36FFA35
isLiveEdit : false
isModule : false
length : 54
@@ -521,7 +521,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 793E2411EDECF405E8825B31460F7CD9A656E096
+ hash : EB6DE473AF1788035E434F41ABC86140BD638644
isLiveEdit : false
isModule : false
length : 88
@@ -540,7 +540,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : DED761235CC24EE21D0E9277CF599B5A292D8053
+ hash : 63710BD01F77B0F0C00E0662B45B782A5976A206
isLiveEdit : false
isModule : false
length : 89
@@ -559,7 +559,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 9CF28A415BF64507692D5A84D1786BD4437C3511
+ hash : FC51669018C05471C8F46535240B9C77110E7291
isLiveEdit : false
isModule : false
length : 40
@@ -578,7 +578,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : BEE49C625BF64507692D5A84D1786BD4437C3511
+ hash : FE0F41315BEF7930C8F465359F53D504110E7291
isLiveEdit : false
isModule : false
length : 41
@@ -597,7 +597,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A8443D429DC872EBA18
+ hash : 1D33B330614150F8F7EB45184E245F542E304118
isLiveEdit : false
isModule : false
length : 18
@@ -616,7 +616,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : D26251F019679C792E2B4EF437A20E96134D5C26
+ hash : 474CCF101B1996849E154ED20F854F368C793767
isLiveEdit : false
isModule : false
length : 96
@@ -635,7 +635,7 @@ scriptParsed
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 814AEF635BF64507692D5A84BCD47D85C4031CD2
+ hash : DD0A3C60B97506161FA15D9419969A90DF33E2E0
isLiveEdit : false
isModule : false
length : 39
@@ -665,7 +665,7 @@ scriptFailedToParse
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : true
- hash : C0C37C619263496792D5CFE7CA6F2911028287A6
+ hash : BB13EE1015533BA2FF7F72464D9CDD374BABDA67
isModule : false
length : 56
scriptId : <scriptId>
@@ -683,7 +683,7 @@ scriptParsed
endLine : 1
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A84A8DC8126872EBA18
+ hash : 1D33B330A774BD52F7EB45184E245F54C94FB815
isLiveEdit : false
isModule : false
length : 19
@@ -702,7 +702,7 @@ scriptParsed
endLine : 2
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A8443D429DC872EBA18
+ hash : 1D33B330EA93E221F7EB45184E245F5479702083
isLiveEdit : false
isModule : false
length : 20
@@ -721,7 +721,7 @@ scriptParsed
endLine : 3
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A84A8DC8126872EBA18
+ hash : 867981735982F829F7EB45184E245F5479702083
isLiveEdit : false
isModule : false
length : 21
@@ -740,7 +740,7 @@ scriptParsed
endLine : 4
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9CF28A415BF64507692D5A8443D429DC872EBA18
+ hash : 095D6881D29C0D76F7EB45184E245F5479702083
isLiveEdit : false
isModule : false
length : 22
diff --git a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
index 75014e1144..4f0d26150e 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate-expected.txt
@@ -7,7 +7,7 @@ Runtime.evaluate with valid expression
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 9D04F7335D1661503EAB9AF3EACAF92020803F34
+ hash : A933D880D35072516A27D817C0079F01FEE46E23
isLiveEdit : false
isModule : false
length : 29
@@ -26,7 +26,7 @@ Runtime.evaluate with syntax error
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 9BCA34A10E5386925E74C1716C857BEB02821E15
+ hash : 81077F73E5B26710D5CE891275FA68AC12493B57
isModule : false
length : 39
scriptId : <scriptId>
@@ -44,7 +44,7 @@ Runtime.callFunctionOn with valid functionDeclaration
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9D04CEA1B54DF92A01A0498543D429DC872EBA18
+ hash : 53BA9611A1E9ABE81E93AF82DEC875F94048A807
isLiveEdit : false
isModule : false
length : 18
@@ -81,7 +81,7 @@ Runtime.compileScript with valid expression
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9D04F733E4E4F1BA4CB5762843D429DC872EBA18
+ hash : 9D04F7336B76D25A4CB5762843D429DC872EBA18
isLiveEdit : false
isModule : false
length : 4
@@ -119,7 +119,7 @@ Runtime.evaluate compiled script with stack trace
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : A2185340E4E4F1BA4CB5762843D429DC872EBA18
+ hash : A2185340102B6713AB58F5467452EC0C872EBA18
isLiveEdit : false
isModule : false
length : 8
@@ -137,7 +137,7 @@ Runtime.evaluate compiled script with stack trace
endLine : 4
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 6AF4ED72C355E471C4E94D74464A25541932A242
+ hash : 31052EF18E11A850DBC812A6154A27BA713D3521
isLiveEdit : false
isModule : false
length : 86
@@ -166,7 +166,7 @@ Runtime.evaluate compiled script with stack trace
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9D04F733E4E4F1BA4CB5762843D429DC872EBA18
+ hash : 9D04F7336B76D25A4CB5762843D429DC872EBA18
isLiveEdit : false
isModule : false
length : 4
@@ -196,7 +196,7 @@ Runtime.evaluate compile script error with stack trace
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : A218534034AA14704CB5762843D429DC872EBA18
+ hash : 55C8AE40102B6713AB58F546879BAE951E6BF325
isLiveEdit : false
isModule : false
length : 12
@@ -214,7 +214,7 @@ Runtime.evaluate compile script error with stack trace
endLine : 4
executionContextId : <executionContextId>
hasSourceURL : true
- hash : 5FD5C8C38F2D44588A4078148B0BC5635152B0C0
+ hash : 578C87106744016A01ADEA516DC739379190F4E2
isLiveEdit : false
isModule : false
length : 98
diff --git a/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
index 88cb394555..a3a1b9e7bd 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
@@ -34,7 +34,7 @@ Tests scripts hasing
endLine : 0
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 885818413D7FC3E2220B3E367FF57CB1D1572095
+ hash : 0449F3A28D4C32C1EC3696332EAD65644BDE59A7
isLiveEdit : false
isModule : false
length : 8106
diff --git a/deps/v8/test/inspector/debugger/script-unique-hash-expected.txt b/deps/v8/test/inspector/debugger/script-unique-hash-expected.txt
new file mode 100644
index 0000000000..4591b7faf3
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-unique-hash-expected.txt
@@ -0,0 +1,169 @@
+Checks hash in Debugger.scriptParsed event
+1
+2
+PASS
+1;
+2;
+PASS
+1 ;
+2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
+1 ;
+2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1 ;
+ 2 ;
+PASS
+ 1;
+ 2;
+PASS
diff --git a/deps/v8/test/inspector/debugger/script-unique-hash.js b/deps/v8/test/inspector/debugger/script-unique-hash.js
new file mode 100644
index 0000000000..06eca5d4fc
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-unique-hash.js
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Checks hash in Debugger.scriptParsed event');
+
+(async function main() {
+ Protocol.Debugger.enable();
+
+ const tests = [{firstScript: '1', secondScript: '2'}];
+ for (let length = 1; length <= 10; ++length) {
+ for (let differentChar = 0; differentChar < length; ++differentChar) {
+ const firstScript = ' '.repeat(differentChar) + '1' +
+ ' '.repeat(length - differentChar - 1) + ';';
+ const secondScript = ' '.repeat(differentChar) + '2' +
+ ' '.repeat(length - differentChar - 1) + ';';
+ tests.push({firstScript, secondScript});
+ }
+ }
+
+ for (const {firstScript, secondScript} of tests) {
+ InspectorTest.log(firstScript);
+ const firstScriptParsed = Protocol.Debugger.onceScriptParsed();
+ Protocol.Runtime.evaluate({expression: firstScript});
+ const hash1 = (await firstScriptParsed).params.hash;
+
+ InspectorTest.log(secondScript);
+ const secondScriptParsed = Protocol.Debugger.onceScriptParsed();
+ Protocol.Runtime.evaluate({expression: secondScript});
+ const hash2 = (await secondScriptParsed).params.hash;
+
+ InspectorTest.log(hash1 === hash2 ? 'Error: the same hash!' : 'PASS');
+ }
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/step-into-external-async-task.js b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
index 7c7c23816d..b0d55c950d 100644
--- a/deps/v8/test/inspector/debugger/step-into-external-async-task.js
+++ b/deps/v8/test/inspector/debugger/step-into-external-async-task.js
@@ -18,8 +18,11 @@ function store(description) {
}
//# sourceURL=utils.js`;
-contextGroup1.addScript(utilsScript);
+// TODO(rmcilroy): This has to be in this order since the i::Script object gets
+// reused via the CompilationCache, and we want OnAfterCompile to be called
+// for contextGroup1 last on this script.
contextGroup2.addScript(utilsScript);
+contextGroup1.addScript(utilsScript);
let frameworkScript = `
function call(id, f) {
diff --git a/deps/v8/test/inspector/debugger/step-into-optimized-blackbox-expected.txt b/deps/v8/test/inspector/debugger/step-into-optimized-blackbox-expected.txt
new file mode 100644
index 0000000000..5f8f657a37
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-optimized-blackbox-expected.txt
@@ -0,0 +1,9 @@
+Tests stepping with blackboxing and inlining
+Paused in
+(...):1
+Paused in
+(...):1
+Paused in
+foo:2
+bar:2
+(...):1
diff --git a/deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js b/deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js
new file mode 100644
index 0000000000..75505891bb
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js
@@ -0,0 +1,46 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests stepping with blackboxing and inlining');
+
+contextGroup.addScript(
+`function bar() {
+ return 1 + foo();
+}
+//# sourceURL=bar.js`);
+
+contextGroup.addScript(
+`function foo() {
+ return "foo";
+}
+//# sourceURL=foo.js`);
+
+Protocol.Debugger.enable();
+Protocol.Debugger.setBlackboxPatterns({ patterns: [ "bar.js" ] });
+
+Protocol.Debugger.onPaused(PerformSteps);
+Protocol.Runtime.evaluate({
+ "expression": "bar(); bar(); %OptimizeFunctionOnNextCall(bar); bar()"
+});
+Protocol.Runtime.evaluate({ "expression": "debugger; bar();" });
+
+var commands = [ "stepInto", "stepInto" ];
+
+function PerformSteps(message) {
+ InspectorTest.log("Paused in");
+ var callFrames = message.params.callFrames;
+ for (var callFrame of callFrames) {
+ InspectorTest.log(
+ (callFrame.functionName || "(...)") + ":" + (callFrame.location.lineNumber + 1));
+ }
+ var command = commands.shift();
+ if (!command) {
+ InspectorTest.completeTest();
+ return;
+ }
+ Protocol.Debugger[command]();
+}
diff --git a/deps/v8/test/inspector/heap-profiler/console-retaining-path-expected.txt b/deps/v8/test/inspector/heap-profiler/console-retaining-path-expected.txt
new file mode 100644
index 0000000000..49e0985081
--- /dev/null
+++ b/deps/v8/test/inspector/heap-profiler/console-retaining-path-expected.txt
@@ -0,0 +1,5 @@
+Tests edge labels of objects retained by DevTools.
+
+Running test: testConsoleRetainingPath
+Edge from (Global handles) to MyClass1: DevTools console
+Edge from (Global handles) to MyClass2: DevTools console
diff --git a/deps/v8/test/inspector/heap-profiler/console-retaining-path.js b/deps/v8/test/inspector/heap-profiler/console-retaining-path.js
new file mode 100644
index 0000000000..7021e7f6e6
--- /dev/null
+++ b/deps/v8/test/inspector/heap-profiler/console-retaining-path.js
@@ -0,0 +1,97 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests edge labels of objects retained by DevTools.');
+
+const kNodeName = 1;
+const kNodeEdgeCount = 4;
+const kNodeSize = 6;
+const kEdgeName = 1;
+const kEdgeTarget = 2;
+const kEdgeSize = 3;
+
+function EdgeName(snapshot, edgeIndex) {
+ return snapshot['strings'][snapshot['edges'][edgeIndex + kEdgeName]];
+}
+
+function EdgeTarget(snapshot, edgeIndex) {
+ return snapshot['edges'][edgeIndex + kEdgeTarget];
+}
+
+function EdgeCount(snapshot, nodeIndex) {
+ return snapshot['nodes'][nodeIndex + kNodeEdgeCount];
+}
+
+function NodeName(snapshot, nodeIndex) {
+ return snapshot['strings'][snapshot['nodes'][nodeIndex + kNodeName]];
+}
+
+function NodeEdges(snapshot, nodeIndex) {
+ let startEdgeIndex = 0;
+ for (let i = 0; i < nodeIndex; i += kNodeSize) {
+ startEdgeIndex += EdgeCount(snapshot, i);
+ }
+ let endEdgeIndex = startEdgeIndex + EdgeCount(snapshot, nodeIndex);
+ let result = [];
+ for (let i = startEdgeIndex; i < endEdgeIndex; ++i) {
+ result.push(i * kEdgeSize);
+ }
+ return result;
+}
+
+function NodeByName(snapshot, name) {
+ let count = snapshot['nodes'].length / kNodeSize;
+ for (let i = 0; i < count; i++) {
+ if (NodeName(snapshot, i * kNodeSize) == name) return i * kNodeSize;
+ }
+ InspectorTest.log(`Cannot node ${name}`);
+ return 0;
+}
+
+function FindEdge(snapshot, sourceName, targetName) {
+ let sourceIndex = NodeByName(snapshot, sourceName);
+ let targetIndex = NodeByName(snapshot, targetName);
+ let edges = NodeEdges(snapshot, sourceIndex);
+ for (let edge of edges) {
+ if (EdgeTarget(snapshot, edge) == targetIndex) return edge;
+ }
+ InspectorTest.log(`Cannot find edge between ${sourceName} and ${targetName}`);
+ return 0;
+}
+
+function GlobalHandleEdgeName(snapshot, targetName) {
+ let edge = FindEdge(snapshot, '(Global handles)', targetName);
+ let edgeName = EdgeName(snapshot, edge);
+ // Make the test more robust by skipping the edge index prefix and
+ // a single space.
+ return edgeName.substring(edgeName.indexOf('/') + 2);
+}
+
+contextGroup.addScript(`
+class MyClass1 {};
+class MyClass2 {};
+//# sourceURL=test.js`);
+
+Protocol.Debugger.enable();
+Protocol.HeapProfiler.enable();
+
+InspectorTest.runAsyncTestSuite([
+ async function testConsoleRetainingPath() {
+ let snapshot_string = '';
+ function onChunk(message) {
+ snapshot_string += message['params']['chunk'];
+ }
+ Protocol.HeapProfiler.onAddHeapSnapshotChunk(onChunk)
+ await Protocol.Runtime.evaluate({ expression: 'new MyClass1();' });
+ await Protocol.Runtime.evaluate(
+ { expression: 'console.log(new MyClass2());' });
+ await Protocol.HeapProfiler.takeHeapSnapshot({ reportProgress: false })
+ let snapshot = JSON.parse(snapshot_string);
+ let edge1 = GlobalHandleEdgeName(snapshot, 'MyClass1');
+ let edge2 = GlobalHandleEdgeName(snapshot, 'MyClass2');
+ InspectorTest.log(`Edge from (Global handles) to MyClass1: ${edge1}`);
+ InspectorTest.log(`Edge from (Global handles) to MyClass2: ${edge2}`);
+ }
+]);
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 04a23df528..513699fa6f 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -707,6 +707,10 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
inspector->Set(ToV8String(isolate, "scheduleWithAsyncStack"),
v8::FunctionTemplate::New(
isolate, &InspectorExtension::ScheduleWithAsyncStack));
+ inspector->Set(
+ ToV8String(isolate, "setAllowCodeGenerationFromStrings"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::SetAllowCodeGenerationFromStrings));
global->Set(ToV8String(isolate, "inspector"), inspector);
}
@@ -957,6 +961,17 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
v8::Local<v8::Function>::Cast(args[0])));
if (with_empty_stack) context->Enter();
}
+
+ static void SetAllowCodeGenerationFromStrings(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsBoolean()) {
+ fprintf(stderr,
+ "Internal error: setAllowCodeGenerationFromStrings(allow).");
+ Exit();
+ }
+ args.GetIsolate()->GetCurrentContext()->AllowCodeGenerationFromStrings(
+ args[0].As<v8::Boolean>()->Value());
+ }
};
} // namespace
diff --git a/deps/v8/test/inspector/inspector.gyp b/deps/v8/test/inspector/inspector.gyp
deleted file mode 100644
index b465f9e552..0000000000
--- a/deps/v8/test/inspector/inspector.gyp
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'v8_code': 1,
- },
- 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'inspector-test',
- 'type': 'executable',
- 'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
- '../../src/v8.gyp:v8_libbase',
- '../../src/v8.gyp:v8',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- 'inspector-test.cc',
- 'isolate-data.cc',
- 'isolate-data.h',
- 'task-runner.cc',
- 'task-runner.h',
- ],
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ],
- }],
- ],
- },
- ],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'inspector-test_run',
- 'type': 'none',
- 'dependencies': [
- 'inspector-test',
- ],
- 'includes': [
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'inspector.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index 646fd018ea..2ce561c7d0 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -6,7 +6,7 @@ Checks basic ES6 modules support.
endLine : 5
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 9C014F7249BAFA12B91017817AD15091D01A9155
+ hash : E885A593519C4CD31BC243D5D06DCB961A1C69B5
isLiveEdit : false
isModule : true
length : 83
@@ -24,7 +24,7 @@ Checks basic ES6 modules support.
endLine : 5
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 443A2FA24A6112E6B9101781E6A19B56BDC396D4
+ hash : EFD8445134EBF926917EAE6072964574AB6D6C60
isLiveEdit : false
isModule : true
length : 84
@@ -42,7 +42,7 @@ Checks basic ES6 modules support.
endLine : 8
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 54D834614FBF9B389082DAE06CD3EFC499BEBF13
+ hash : B454ADE26B54DA4057C498A205C459099654AC47
isLiveEdit : false
isModule : true
length : 191
diff --git a/deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt b/deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt
new file mode 100644
index 0000000000..080db69d7e
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-run-microtasks-expected.txt
@@ -0,0 +1,39 @@
+Tests that microtasks run before the Runtime.evaluate response is sent
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ description : 42
+ type : number
+ value : 42
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 37
+ functionName : Promise.resolve.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ timestamp : <timestamp>
+ type : log
+ }
+}
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Promise
+ description : Promise
+ objectId : <objectId>
+ subtype : promise
+ type : object
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/evaluate-run-microtasks.js b/deps/v8/test/inspector/runtime/evaluate-run-microtasks.js
new file mode 100644
index 0000000000..f2b6f28210
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-run-microtasks.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {Protocol} = InspectorTest.start(
+ 'Tests that microtasks run before the Runtime.evaluate response is sent');
+
+Protocol.Runtime.enable();
+Protocol.Runtime.onConsoleAPICalled(InspectorTest.logMessage);
+Protocol.Runtime
+ .evaluate({expression: 'Promise.resolve().then(() => console.log(42))'})
+ .then(InspectorTest.logMessage)
+ .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index 64fa163a5e..66db34b562 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -14,7 +14,7 @@ EXPECTED_SUFFIX = "-expected.txt"
RESOURCES_FOLDER = "resources"
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(
os.path.join(self.root), followlinks=True):
@@ -43,7 +43,7 @@ class TestCase(testcase.TestCase):
self._source_flags = self._parse_source_flags()
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [
os.path.join(self.suite.root, PROTOCOL_TEST_JS),
os.path.join(self.suite.root, self.path + self._get_suffix()),
@@ -65,5 +65,5 @@ class TestCase(testcase.TestCase):
os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX)
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/intl/date-format/month-far-future.js b/deps/v8/test/intl/date-format/month-far-future.js
new file mode 100644
index 0000000000..930ac388b3
--- /dev/null
+++ b/deps/v8/test/intl/date-format/month-far-future.js
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test for crbug.com/801602 .
+
+var locales = [
+ "en-u-ca-gregori",
+ "fa-u-ca-persian",
+ "ar-u-ca-islamic-civil",
+ "ar-u-ca-islamic-umalqura",
+ "ar-u-ca-islamic-tbla",
+ "ar-u-ca-islamic-rgsa",
+ "he-u-ca-hebrew",
+ "zh-u-ca-chinese",
+ "ko-u-ca-dangi",
+ "ja-u-ca-japanese",
+ "am-u-ca-ethiopic",
+ "am-u-ca-ethioaa",
+ "hi-u-ca-indian",
+ "th-u-ca-buddhist",
+];
+
+var hugeNum = 1.7976931348623157e+308;
+
+locales.forEach(function(loc) {
+ var df = new Intl.DateTimeFormat(loc, {month: "long"});
+ assertFalse(df.format(hugeNum) == '');
+}
+);
diff --git a/deps/v8/test/intl/intl.gyp b/deps/v8/test/intl/intl.gyp
deleted file mode 100644
index f2e107f523..0000000000
--- a/deps/v8/test/intl/intl.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'intl_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'intl.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 87aece3da6..aa81c93434 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -31,7 +31,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
@@ -59,7 +59,7 @@ class TestCase(testcase.TestCase):
self._source_flags = self._parse_source_flags()
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
files = map(lambda f: os.path.join(self.suite.root, f), [
'assert.js',
'utils.js',
@@ -68,19 +68,19 @@ class TestCase(testcase.TestCase):
'regexp-assert.js',
])
- if ctx.isolates:
+ if self._test_config.isolates:
files += ['--isolate'] + files
return files
def _get_source_flags(self):
return self._source_flags
- def _get_suite_flags(self, ctx):
+ def _get_suite_flags(self):
return ['--allow-natives-syntax']
def _get_source_path(self):
return os.path.join(self.suite.root, self.path + self._get_suffix())
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/js-perf-test/Array/from.js b/deps/v8/test/js-perf-test/Array/from.js
new file mode 100644
index 0000000000..2c6bd265bd
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/from.js
@@ -0,0 +1,153 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+benchy('MixedFrom', MixedFrom, MixedFromSetup);
+benchy('SmiFrom', SmiFrom, SmiFromSetup);
+benchy('SmallSmiFrom', SmallSmiFrom, SmallSmiFromSetup);
+benchy('SmiNoMapFrom', SmiNoMapFrom, SmiNoMapFromSetup);
+benchy('SmiNoIteratorFrom', SmiNoIteratorFrom, SmiNoIteratorFromSetup);
+benchy('TransplantedFrom', TransplantedFrom, TransplantedFromSetup);
+benchy('DoubleFrom', DoubleFrom, DoubleFromSetup);
+benchy('StringFrom', StringFrom, StringFromSetup);
+benchy('StringNoMapFrom', StringNoMapFrom, StringNoMapFromSetup);
+
+function ArrayLike() {}
+ArrayLike.from = Array.from;
+
+var arg
+var result;
+var func
+
+var smi_array = [
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+];
+
+var double_array = [
+ 1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,
+ 11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,
+ 1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,
+ 11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,
+ 1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,
+ 11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,
+ 1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,
+ 11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,
+ 1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,
+ 11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,
+];
+
+var string_array = [
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+];
+
+var mixed_array = [
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+ 1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,
+ 11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,
+ 1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,
+ 11.5,12.5,13.5,14.5,15.5,16.5,17.5,18.5,19.5,20.5,
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+ "a", "b", "c", "a", "b", "c", "a", "b", "c", "a",
+]
+
+// Although these functions have the same code, they are separated for
+// clean IC feedback.
+function SmallSmiFrom() {
+ result = Array.from(arg, func);
+}
+
+function SmiNoMapFrom() {
+ result = Array.from(arg);
+}
+
+function SmiFrom() {
+ result = Array.from(arg, func);
+}
+
+function SmiNoIteratorFrom() {
+ result = Array.from(arg, func);
+}
+
+function TransplantedFrom() {
+ result = ArrayLike.from(arg, func);
+}
+
+function DoubleFrom() {
+ result = Array.from(arg, func);
+}
+
+function StringFrom() {
+ result = Array.from(arg, func);
+}
+
+function StringNoMapFrom() {
+ result = Array.from(arg);
+}
+
+function MixedFrom() {
+ result = Array.from(arg, func);
+}
+
+function SmallSmiFromSetup() {
+ func = (v,i) => v + i;
+ arg = [1,2,3];
+}
+
+function SmiNoMapFromSetup() {
+ func = undefined;
+ arg = smi_array;
+}
+
+function SmiFromSetup() {
+ func = (v,i) => v + i;
+ arg = smi_array;
+}
+
+function SmiNoIteratorFromSetup() {
+ func = (v,i) => v + i;
+ array = smi_array;
+ arg = {length: array.length}
+ Object.assign(arg, array);
+}
+
+function TransplantedFromSetup() {
+ func = (v,i) => v + i;
+ arg = smi_array;
+}
+
+function DoubleFromSetup() {
+ func = (v,i) => v + i;
+ arg = double_array;
+}
+
+function StringFromSetup() {
+ func = (v,i) => v + i;
+ arg = string_array;
+}
+
+function StringNoMapFromSetup() {
+ func = undefined;
+ arg = string_array;
+}
+
+function MixedFromSetup() {
+ func = (v,i) => v + i;
+ arg = mixed_array;
+}
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/run.js b/deps/v8/test/js-perf-test/Array/run.js
index 15b4da94c2..623047678a 100644
--- a/deps/v8/test/js-perf-test/Array/run.js
+++ b/deps/v8/test/js-perf-test/Array/run.js
@@ -77,9 +77,10 @@ load('reduce.js');
load('reduce-right.js');
load('find.js');
load('find-index.js');
-load('of.js');
// Other Array builtins.
+load('from.js');
+load('of.js');
load('join.js');
load('to-string.js');
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralSpread/run.js b/deps/v8/test/js-perf-test/ArrayLiteralSpread/run.js
new file mode 100644
index 0000000000..605df86ef9
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralSpread/run.js
@@ -0,0 +1,97 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+const input = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: Babel
+// ----------------------------------------------------------------------------
+
+function _toConsumableArray(arr) {
+ if (Array.isArray(arr)) {
+ for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) {
+ arr2[i] = arr[i];
+ }
+ return arr2;
+ } else {
+ return Array.from(arr);
+ }
+}
+
+function Babel() {
+ const result = [0].concat(_toConsumableArray(input));
+ if (result.length != 11) throw 666;
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+
+function ForOfPush() {
+ const result = [0];
+ for (const x of input) {
+ result.push(x);
+ }
+ if (result.length != 11) throw 666;
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfSet
+// ----------------------------------------------------------------------------
+
+
+function ForOfSet() {
+ const result = [0];
+ for (const x of input) {
+ result[result.length] = x;
+ }
+ if (result.length != 11) throw 666;
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+
+function Spread() {
+ const result = [0, ...input];
+ if (result.length != 11) throw 666;
+}
+
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralSpread(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [100], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('Babel', Babel);
+CreateBenchmark('ForOfPush', ForOfPush);
+CreateBenchmark('ForOfSet', ForOfSet);
+CreateBenchmark('Spread', Spread);
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/Generators/generators.js b/deps/v8/test/js-perf-test/Generators/generators.js
index d49dee3069..b891f2a5fb 100644
--- a/deps/v8/test/js-perf-test/Generators/generators.js
+++ b/deps/v8/test/js-perf-test/Generators/generators.js
@@ -67,7 +67,7 @@ function Loop() {
// ----------------------------------------------------------------------------
function* multiples(x) {
- let skip = function.sent || 0;
+ let skip = 2;
let next = 0;
while (true) {
if (skip === 0) {
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index a1a19f5988..b4484444ef 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -56,10 +56,22 @@
"path": ["Generators"],
"main": "run.js",
"resources": ["generators.js"],
- "flags": ["--harmony-function-sent"],
"results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
},
{
+ "name": "ArrayLiteralSpread",
+ "path": ["ArrayLiteralSpread"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralSpread\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Babel"},
+ {"name": "ForOfPush"},
+ {"name": "ForOfSet"},
+ {"name": "Spread"}
+ ]
+ },
+ {
"name": "RestParameters",
"path": ["RestParameters"],
"main": "run.js",
@@ -190,7 +202,14 @@
{"name": "StringIndexOfConstant"},
{"name": "StringIndexOfNonConstant"},
{"name": "StringCharCodeAtConstant"},
- {"name": "StringCharCodeAtNonConstant"}
+ {"name": "StringCharCodeAtNonConstant"},
+ {"name": "StringCharCodeAtConstantInbounds"},
+ {"name": "StringCharCodeAtNonConstantInbounds"},
+ {"name": "StringCodePointAtConstant"},
+ {"name": "StringCodePointAtNonConstant"},
+ {"name": "StringCodePointAtConstantInbounds"},
+ {"name": "StringCodePointAtNonConstantInbounds"}
+
]
},
{
@@ -397,7 +416,7 @@
"resources": [
"filter.js", "map.js", "every.js", "join.js", "some.js",
"reduce.js", "reduce-right.js", "to-string.js", "find.js",
- "find-index.js", "of.js"
+ "find-index.js", "from.js", "of.js", "for-each.js"
],
"flags": [
"--allow-natives-syntax"
@@ -473,7 +492,16 @@
{"name": "SmallTransplantedArrayOf"},
{"name": "SmallDoubleArrayOf"},
{"name": "SmallStringArrayOf"},
- {"name": "SmallMixedArrayOf"}
+ {"name": "SmallMixedArrayOf"},
+ {"name": "SmiFrom"},
+ {"name": "SmallSmiFrom"},
+ {"name": "SmiNoMapFrom"},
+ {"name": "SmiNoIteratorFrom"},
+ {"name": "TransplantedFrom"},
+ {"name": "DoubleFrom"},
+ {"name": "StringFrom"},
+ {"name": "StringNoMapFrom"},
+ {"name": "MixedFrom"}
]
},
{
diff --git a/deps/v8/test/js-perf-test/OWNERS b/deps/v8/test/js-perf-test/OWNERS
new file mode 100644
index 0000000000..e46cedb98b
--- /dev/null
+++ b/deps/v8/test/js-perf-test/OWNERS
@@ -0,0 +1 @@
+per-file JSTests.json=petermarshall@chromium.org
diff --git a/deps/v8/test/js-perf-test/Strings/string-indexof.js b/deps/v8/test/js-perf-test/Strings/string-indexof.js
index a2049e0fe6..d110ce6c0e 100644
--- a/deps/v8/test/js-perf-test/Strings/string-indexof.js
+++ b/deps/v8/test/js-perf-test/Strings/string-indexof.js
@@ -36,17 +36,28 @@ function StringIndexOfNonConstant() {
}
new BenchmarkSuite('StringCharCodeAtConstant', [3], [
- new Benchmark('StringIndexOfConstant', true, false, 0,
+ new Benchmark('StringCharCodeAtConstant', true, false, 0,
StringCharCodeAtConstant),
]);
new BenchmarkSuite('StringCharCodeAtNonConstant', [3], [
- new Benchmark('StringIndexOfNonConstant', true, false, 0,
+ new Benchmark('StringCharCodeAtNonConstant', true, false, 0,
StringCharCodeAtNonConstant),
]);
+new BenchmarkSuite('StringCharCodeAtConstantInbounds', [3], [
+ new Benchmark('StringCharCodeAtConstantInbounds', true, false, 0,
+ StringCharCodeAtConstantInbounds),
+]);
+
+new BenchmarkSuite('StringCharCodeAtNonConstantInbounds', [3], [
+ new Benchmark('StringCharCodeAtNonConstantInbounds', true, false, 0,
+ StringCharCodeAtNonConstantInbounds),
+]);
+
const string = "qweruiplkjhgfdsazxccvbnm";
const indices = [1, 13, 32, 100, "xx"];
+const indicesInbounds = [1, 7, 13, 17, "xx"];
function StringCharCodeAtConstant() {
var sum = 0;
@@ -67,3 +78,85 @@ function StringCharCodeAtNonConstant() {
return sum;
}
+
+function StringCharCodeAtConstantInbounds() {
+ var sum = 0;
+
+ for (var j = 0; j < indicesInbounds.length - 1; ++j) {
+ sum += string.charCodeAt(indicesInbounds[j] | 0);
+ }
+
+ return sum;
+}
+
+function StringCharCodeAtNonConstantInbounds() {
+ var sum = 0;
+
+ for (var j = 0; j < indicesInbounds.length - 1; ++j) {
+ sum += string.charCodeAt(indicesInbounds[j]);
+ }
+
+ return sum;
+}
+
+new BenchmarkSuite('StringCodePointAtConstant', [3], [
+ new Benchmark('StringCodePointAtConstant', true, false, 0,
+ StringCodePointAtConstant),
+]);
+
+new BenchmarkSuite('StringCodePointAtNonConstant', [3], [
+ new Benchmark('StringCodePointAtNonConstant', true, false, 0,
+ StringCodePointAtNonConstant),
+]);
+
+new BenchmarkSuite('StringCodePointAtConstantInbounds', [3], [
+ new Benchmark('StringCodePointAtConstantInbounds', true, false, 0,
+ StringCodePointAtConstantInbounds),
+]);
+
+new BenchmarkSuite('StringCodePointAtNonConstantInbounds', [3], [
+ new Benchmark('StringCodePointAtNonConstantInbounds', true, false, 0,
+ StringCodePointAtNonConstantInbounds),
+]);
+
+const unicode_string = "qweräϠ�𝌆krefdäϠ�𝌆ccäϠ�𝌆";
+
+function StringCodePointAtConstant() {
+ var sum = 0;
+
+ for (var j = 0; j < indices.length - 1; ++j) {
+ sum += unicode_string.codePointAt(indices[j] | 0);
+ }
+
+ return sum;
+}
+
+function StringCodePointAtNonConstant() {
+ var sum = 0;
+
+ for (var j = 0; j < indices.length - 1; ++j) {
+ sum += unicode_string.codePointAt(indices[j]);
+ }
+
+ return sum;
+}
+
+function StringCodePointAtConstantInbounds() {
+ var sum = 0;
+
+ for (var j = 0; j < indicesInbounds.length - 1; ++j) {
+ sum += unicode_string.codePointAt(indicesInbounds[j] | 0);
+ }
+
+ return sum;
+}
+
+function StringCodePointAtNonConstantInbounds() {
+ var sum = 0;
+
+ for (var j = 0; j < indicesInbounds.length - 1; ++j) {
+ sum += unicode_string.codePointAt(indicesInbounds[j]);
+ }
+
+ return sum;
+}
diff --git a/deps/v8/test/message/fail/function-sent-escaped.js b/deps/v8/test/message/fail/function-sent-escaped.js
deleted file mode 100644
index aa17258f85..0000000000
--- a/deps/v8/test/message/fail/function-sent-escaped.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-function-sent
-
-function* f() {
- return function.s\u0065nt;
-}
-for (var i of f()) print(i);
diff --git a/deps/v8/test/message/fail/function-sent-escaped.out b/deps/v8/test/message/fail/function-sent-escaped.out
deleted file mode 100644
index d9613d8ef4..0000000000
--- a/deps/v8/test/message/fail/function-sent-escaped.out
+++ /dev/null
@@ -1,4 +0,0 @@
-*%(basename)s:8: SyntaxError: 'function.sent' must not contain escaped characters
- return function.s\u0065nt;
- ^^^^^^^^^^^^^^^^^^
-SyntaxError: 'function.sent' must not contain escaped characters
diff --git a/deps/v8/test/message/fail/paren_in_arg_string.out b/deps/v8/test/message/fail/paren_in_arg_string.out
index 9784712ab8..d71e952122 100644
--- a/deps/v8/test/message/fail/paren_in_arg_string.out
+++ b/deps/v8/test/message/fail/paren_in_arg_string.out
@@ -1,6 +1,6 @@
-*%(basename)s:29: SyntaxError: Function arg string contains parenthesis
-var paren_in_arg_string_bad = new Function(')', 'return;');
- ^
-SyntaxError: Function arg string contains parenthesis
+undefined:1: SyntaxError: Arg string terminates parameters early
+(function anonymous()
+ ^
+SyntaxError: Arg string terminates parameters early
at new Function (<anonymous>)
at *%(basename)s:29:31
diff --git a/deps/v8/test/message/fail/redeclaration4.js b/deps/v8/test/message/fail/redeclaration4.js
new file mode 100644
index 0000000000..3cef563f30
--- /dev/null
+++ b/deps/v8/test/message/fail/redeclaration4.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+{
+ function foo() {}
+ function foo() {}
+}
diff --git a/deps/v8/test/message/fail/redeclaration4.out b/deps/v8/test/message/fail/redeclaration4.out
new file mode 100644
index 0000000000..9e1a7cbce9
--- /dev/null
+++ b/deps/v8/test/message/fail/redeclaration4.out
@@ -0,0 +1,5 @@
+*%(basename)s:9: SyntaxError: Identifier 'foo' has already been declared
+ function foo() {}
+ ^
+SyntaxError: Identifier 'foo' has already been declared
+
diff --git a/deps/v8/test/message/fail/redeclaration5.js b/deps/v8/test/message/fail/redeclaration5.js
new file mode 100644
index 0000000000..d47593ef08
--- /dev/null
+++ b/deps/v8/test/message/fail/redeclaration5.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+
+function foo() {}
+function foo() {}
diff --git a/deps/v8/test/message/fail/redeclaration5.out b/deps/v8/test/message/fail/redeclaration5.out
new file mode 100644
index 0000000000..241582e0ee
--- /dev/null
+++ b/deps/v8/test/message/fail/redeclaration5.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: Identifier 'foo' has already been declared
+function foo() {}
+^
+SyntaxError: Identifier 'foo' has already been declared
+
diff --git a/deps/v8/test/message/message.gyp b/deps/v8/test/message/message.gyp
deleted file mode 100644
index fc1ae32f4e..0000000000
--- a/deps/v8/test/message/message.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'message_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'message.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/message/mjsunit/fail/assert_true.js b/deps/v8/test/message/mjsunit/fail/assert_true.js
new file mode 100644
index 0000000000..363f41a781
--- /dev/null
+++ b/deps/v8/test/message/mjsunit/fail/assert_true.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/mjsunit.js");
+
+assertTrue(false);
diff --git a/deps/v8/test/message/mjsunit/fail/assert_true.out b/deps/v8/test/message/mjsunit/fail/assert_true.out
new file mode 100644
index 0000000000..055cac4cde
--- /dev/null
+++ b/deps/v8/test/message/mjsunit/fail/assert_true.out
@@ -0,0 +1,10 @@
+test/mjsunit/mjsunit.js:{NUMBER}: Failure: expected <true> found <false>
+
+Stack: MjsUnitAssertionError
+ at assertTrue *mjsunit.js {NUMBER}:{NUMBER}
+ at *%(basename)s 7:1
+ throw new MjsUnitAssertionError(message);
+ ^
+MjsUnitAssertionError
+ at assertTrue *mjsunit.js {NUMBER}:{NUMBER}
+ at *%(basename)s 7:1
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index cd1495f390..b0f821f62f 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -39,7 +39,7 @@ MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
@@ -58,13 +58,6 @@ class TestSuite(testsuite.TestSuite):
def _test_class(self):
return TestCase
- def CreateLegacyVariantsGenerator(self, variants):
- return super(TestSuite, self).CreateLegacyVariantsGenerator(
- variants + ["preparser"])
-
- def create_variant_proc(self, variants):
- return super(TestSuite, self).create_variant_proc(variants + ['preparser'])
-
class TestCase(testcase.TestCase):
def __init__(self, *args, **kwargs):
@@ -90,11 +83,11 @@ class TestCase(testcase.TestCase):
path = head
return False
- def _get_cmd_params(self, ctx):
- params = super(TestCase, self)._get_cmd_params(ctx)
+ def _get_cmd_params(self):
+ params = super(TestCase, self)._get_cmd_params()
return [p for p in params if p not in INVALID_FLAGS]
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return self._source_files
def _get_source_flags(self):
@@ -110,5 +103,5 @@ class TestCase(testcase.TestCase):
self._expected_fail())
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index 171a40f092..691cd54098 100644
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
@@ -641,6 +641,54 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
assertEquals(total, g());
})();
+(function OptimizedReduceEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,22,,33,45,56,,6,77,84,93,101,];
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + current;
+ };
+ let g = function() {
+ return array.reduce(f);
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ g();
+ deopt = false;
+ array = [11,22,33,45,56,6,77,84,93,101];
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ assertEquals(total, g());
+})();
+
+(function OptimizedReduceRightEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,22,,33,45,56,,6,77,84,93,101,];
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[array.length-1] = 1.5; }
+ return a + current;
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ }
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ g();
+ deopt = false;
+ array = [11,22,33,45,56,6,77,84,93,101];
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ assertEquals(total, g());
+})();
+
(function ReduceCatch() {
let f = (a,current) => {
return a + current;
@@ -1241,3 +1289,14 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
%OptimizeFunctionOnNextCall(__f_3253);
assertEquals(18, __f_3253(__v_12258));
})();
+
+(function ReduceMixedHoleyArrays() {
+ function r(a) {
+ return a.reduce((acc, i) => {acc[0]});
+ }
+ r([[0]]);
+ r([[0]]);
+ r([0,,]);
+ %OptimizeFunctionOnNextCall(r);
+ r([,0,0]);
+})();
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index acc96117be..0454da8cae 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -503,6 +503,19 @@ function TestSortOnNonExtensible() {
}
TestSortOnNonExtensible();
+function TestSortOnTypedArray() {
+ var array = new Int8Array([10,9,8,7,6,5,4,3,2,1]);
+ Object.defineProperty(array, "length", {value: 5});
+ Array.prototype.sort.call(array);
+ assertEquals(array, new Int8Array([10,6,7,8,9,5,4,3,2,1]));
+
+ var array = new Int8Array([10,9,8,7,6,5,4,3,2,1]);
+ Object.defineProperty(array, "length", {value: 15});
+ Array.prototype.sort.call(array);
+ assertEquals(array, new Int8Array([1,10,2,3,4,5,6,7,8,9]));
+}
+TestSortOnTypedArray();
+
// Test special prototypes
(function testSortSpecialPrototypes() {
diff --git a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
index 2ef0cc3a01..f615032d97 100644
--- a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
+++ b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
@@ -29,21 +29,23 @@ function runTest(f, message, mkICTraining, deoptArg) {
// Make sure the optimized function can handle
// all trained maps without deopt.
for (let a of t3) {
+ message += " for args " + JSON.stringify(a) + " should have been optimized";
f(a.arr, () => a.el);
- message += " for args " + JSON.stringify(a);
- assertOptimized(f, undefined, message + " should have been optimized");
+ assertOptimized(f, undefined, message);
}
} else {
// Trigger deopt, causing no-speculation bit to be set.
let a1 = deoptArg;
let a2 = deoptArg;
message += " for args " + JSON.stringify(a1);
+ message_unoptimized = message + " should have been unoptimized"
+ message_optimized = message + " should have been unoptimized"
f(a1.arr, () => a1.el);
- assertUnoptimized(f, undefined, message + " should have been unoptimized");
+ assertUnoptimized(f, undefined, message_unoptimized);
%OptimizeFunctionOnNextCall(f);
// No speculation should protect against further deopts.
f(a2.arr, () => a2.el);
- assertOptimized(f, undefined, message + " should have been optimized");
+ assertOptimized(f, undefined, message_optimized);
}
}
diff --git a/deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js b/deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js
new file mode 100644
index 0000000000..f4d8cd4e5d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-capability-default-closures.js
@@ -0,0 +1,47 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ var resolve, value;
+ (new Promise(r => resolve = r)).then(v => value = v);
+ function foo() { resolve(1); }
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ setTimeout(_ => assertEquals(1, value));
+})();
+
+(function() {
+ var reject, value;
+ (new Promise((_, r) => reject = r)).catch(v => value = v);
+ function foo() { reject(1); }
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ setTimeout(_ => assertEquals(1, value));
+})();
+
+(function() {
+ var value;
+ function foo(x) { return new Promise((resolve, reject) => resolve(x)); }
+ foo(1);
+ foo(1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(1).then(v => value = v);
+ setTimeout(_ => assertEquals(1, value));
+})();
+
+(function() {
+ var value;
+ function foo(x) { return new Promise((resolve, reject) => reject(x)); }
+ foo(1);
+ foo(1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(1).catch(v => value = v);
+ setTimeout(_ => assertEquals(1, value));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/promise-constructor.js b/deps/v8/test/mjsunit/compiler/promise-constructor.js
new file mode 100644
index 0000000000..67677fad4b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-constructor.js
@@ -0,0 +1,198 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --experimental-inline-promise-constructor
+
+// We have to patch mjsunit because normal assertion failures just throw
+// exceptions which are swallowed in a then clause.
+failWithMessage = (msg) => %AbortJS(msg);
+
+// Don't crash.
+(function() {
+ function foo() {
+ let resolve, reject, promise;
+ promise = new Promise((a, b) => { resolve = a; reject = b; });
+
+ return {resolve, reject, promise};
+ }
+
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
+
+// Check that when executor is non-callable, the constructor throws.
+(function() {
+ function foo() {
+ return new Promise(1);
+ }
+
+ assertThrows(foo, TypeError);
+ assertThrows(foo, TypeError);
+ %OptimizeFunctionOnNextCall(foo);
+ assertThrows(foo, TypeError);
+})();
+
+// Check that when the promise constructor throws because the executor is
+// non-callable, the stack contains 'new Promise'.
+(function() {
+ function foo() {
+ return new Promise(1);
+ }
+
+ let threw;
+ try {
+ threw = false;
+ foo();
+ } catch (e) {
+ threw = true;
+ assertContains('new Promise', e.stack);
+ } finally {
+ assertTrue(threw);
+ }
+ try {
+ threw = false;
+ foo();
+ } catch (e) {
+ threw = true;
+ assertContains('new Promise', e.stack);
+ } finally {
+ assertTrue(threw);
+ }
+
+ %OptimizeFunctionOnNextCall(foo);
+ try {
+ threw = false;
+ foo();
+ } catch (e) {
+ threw = true;
+ assertContains('new Promise', e.stack);
+ } finally {
+ assertTrue(threw);
+ }
+})();
+
+// Check that when executor throws, the promise is rejected.
+(function() {
+ function foo() {
+ return new Promise((a, b) => { throw new Error(); });
+ }
+
+ function bar(i) {
+ let error = null;
+ foo().then(_ => error = 1, e => error = e);
+ setTimeout(_ => assertInstanceof(error, Error));
+ if (i == 1) %OptimizeFunctionOnNextCall(foo);
+ if (i > 0) setTimeout(bar.bind(null, i - 1));
+ }
+ bar(3);
+})();
+
+// Check that when executor causes lazy deoptimization of the inlined
+// constructor, we return the promise value and not the return value of the
+// executor function itself.
+(function() {
+ function foo() {
+ let p;
+ try {
+ p = new Promise((a, b) => { %DeoptimizeFunction(foo); });
+ } catch (e) {
+ // Nothing should throw
+ assertUnreachable();
+ }
+ assertInstanceof(p, Promise);
+ }
+
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
+
+// The same as above, except that the executor function also creates a promise
+// and both executor functions cause a lazy deopt of the calling function.
+(function() {
+ function executor(a, b) {
+ %DeoptimizeFunction(foo);
+ let p = new Promise((a, b) => { %DeoptimizeFunction(executor); });
+ }
+ function foo() {
+ let p;
+ try {
+ p = new Promise(executor);
+ } catch (e) {
+ // Nothing should throw
+ assertUnreachable();
+ }
+ assertInstanceof(p, Promise);
+ }
+
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
+
+// Check that when the executor causes lazy deoptimization of the inlined
+// constructor, and then throws, the deopt continuation catches and then calls
+// the reject function instead of propagating the exception.
+(function() {
+ function foo() {
+ let p;
+ try {
+ p = new Promise((a, b) => {
+ %DeoptimizeFunction(foo);
+ throw new Error();
+ });
+ } catch (e) {
+ // The promise constructor should catch the exception and reject the
+ // promise instead.
+ // TODO(petermarshall): This fails but should not. We need to fix deopts.
+ // assertUnreachable();
+ }
+ // TODO(petermarshall): This fails but should not.
+ // assertInstanceof(p, Promise);
+ }
+
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
+
+// Test when the executor is not inlined.
+(function() {
+ let resolve, reject, promise;
+ function bar(a, b) {
+ resolve = a; reject = b;
+ throw new Error();
+ }
+ function foo() {
+ promise = new Promise(bar);
+ }
+ foo();
+ foo();
+ %NeverOptimizeFunction(bar);
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
+
+// Test that the stack trace contains 'new Promise'
+(function() {
+ let resolve, reject, promise;
+ function bar(a, b) {
+ resolve = a; reject = b;
+ let stack = new Error().stack;
+ assertContains("new Promise", stack);
+ throw new Error();
+ }
+ function foo() {
+ promise = new Promise(bar);
+ }
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js
new file mode 100644
index 0000000000..d3bd0b8543
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-1.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(p) { return p.catch(x => x); }
+
+const a = Promise.resolve(1);
+
+foo(a);
+foo(a);
+%OptimizeFunctionOnNextCall(foo);
+foo(a);
+
+let custom_then_called = false;
+a.__proto__.then = function() { custom_then_called = true; }
+foo(a);
+assertTrue(custom_then_called);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js
new file mode 100644
index 0000000000..0d3f34db28
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-custom-then-2.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(p) { return p.catch(x => x); }
+
+const a = Promise.resolve(1);
+
+foo(a);
+foo(a);
+%OptimizeFunctionOnNextCall(foo);
+foo(a);
+
+let custom_then_called = false;
+a.then = function() { custom_then_called = true; }
+foo(a);
+assertTrue(custom_then_called);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js
new file mode 100644
index 0000000000..5aadaada81
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch-subclass.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let custom_then_called = false;
+
+function foo(p) {
+ custom_then_called = false;
+ p.catch(x => x);
+ return custom_then_called;
+}
+
+class MyPromise extends Promise {
+ then(onFulfilled, onRejected) {
+ custom_then_called = true;
+ return super.then(onFulfilled, onRejected);
+ }
+}
+
+const a = MyPromise.resolve(1);
+
+assertTrue(foo(a));
+assertTrue(foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(a));
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-catch.js b/deps/v8/test/mjsunit/compiler/promise-prototype-catch.js
new file mode 100644
index 0000000000..eae343fd2e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-catch.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(p) { return p.catch(); }
+ foo(Promise.resolve(1));
+ foo(Promise.resolve(1));
+ %OptimizeFunctionOnNextCall(foo);
+ foo(Promise.resolve(1));
+})();
+
+(function() {
+ function foo(p) { return p.catch(foo); }
+ foo(Promise.resolve(1));
+ foo(Promise.resolve(1));
+ %OptimizeFunctionOnNextCall(foo);
+ foo(Promise.resolve(1));
+})();
+
+(function() {
+ function foo(p) { return p.catch(foo, undefined); }
+ foo(Promise.resolve(1));
+ foo(Promise.resolve(1));
+ %OptimizeFunctionOnNextCall(foo);
+ foo(Promise.resolve(1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js
new file mode 100644
index 0000000000..a6987d446f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-1.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(p) { return p.finally(x => x); }
+
+const a = Promise.resolve(1);
+
+foo(a);
+foo(a);
+%OptimizeFunctionOnNextCall(foo);
+foo(a);
+
+let custom_then_called = false;
+a.__proto__.then = function() { custom_then_called = true; }
+foo(a);
+assertTrue(custom_then_called);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js
new file mode 100644
index 0000000000..5bad54a61d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-custom-then-2.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(p) { return p.finally(x => x); }
+
+const a = Promise.resolve(1);
+
+foo(a);
+foo(a);
+%OptimizeFunctionOnNextCall(foo);
+foo(a);
+
+let custom_then_called = false;
+a.then = function() { custom_then_called = true; }
+foo(a);
+assertTrue(custom_then_called);
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js
new file mode 100644
index 0000000000..ff5657f6cb
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally-subclass.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let custom_then_called = false;
+
+function foo(p) {
+ custom_then_called = false;
+ p.finally(x => x);
+ return custom_then_called;
+}
+
+class MyPromise extends Promise {
+ then(onFulfilled, onRejected) {
+ custom_then_called = true;
+ return super.then(onFulfilled, onRejected);
+ }
+}
+
+const a = MyPromise.resolve(1);
+
+assertTrue(foo(a));
+assertTrue(foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(a));
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-finally.js b/deps/v8/test/mjsunit/compiler/promise-prototype-finally.js
new file mode 100644
index 0000000000..6060f7b857
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-finally.js
@@ -0,0 +1,41 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p) { return p.finally(); }
+ foo(p);
+ foo(p);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p);
+})();
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p) { return p.finally(x => x); }
+ foo(p);
+ foo(p);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p);
+})();
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p, f) { return p.finally(f); }
+ foo(p, x => x);
+ foo(p, x => x);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p, x => x);
+})();
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p, f) { return p.finally(f).finally(f); }
+ foo(p, x => x);
+ foo(p, x => x);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p, x => x);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/promise-prototype-then.js b/deps/v8/test/mjsunit/compiler/promise-prototype-then.js
new file mode 100644
index 0000000000..caf77708b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-prototype-then.js
@@ -0,0 +1,50 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p) { return p.then(); }
+ foo(p);
+ foo(p);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p);
+})();
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p) { return p.then(x => x); }
+ foo(p);
+ foo(p);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p);
+})();
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p) { return p.then(x => x, y => y); }
+ foo(p);
+ foo(p);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p);
+})();
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p, f) { return p.then(f, f); }
+ foo(p, x => x);
+ foo(p, x => x);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p, x => x);
+})();
+
+(function() {
+ const p = Promise.resolve(1);
+ function foo(p, f) { return p.then(f, f).then(f, f); }
+ foo(p, x => x);
+ foo(p, x => x);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(p, x => x);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/promise-resolve.js b/deps/v8/test/mjsunit/compiler/promise-resolve.js
new file mode 100644
index 0000000000..13cb0fa0a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/promise-resolve.js
@@ -0,0 +1,119 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function assertFulfilledWith(expected, thenable) {
+ assertPromiseResult(thenable, v => assertEquals(expected, v));
+}
+
+(function() {
+ function foo() { return Promise.resolve(); }
+ assertFulfilledWith(undefined, foo());
+ assertFulfilledWith(undefined, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(undefined, foo());
+})();
+
+(function() {
+ function foo(x) { return Promise.resolve(x); }
+ assertFulfilledWith(3, foo(3));
+ assertFulfilledWith(3, foo(3));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(3, foo(3));
+})();
+
+(function() {
+ function foo(x, y) { return Promise.resolve(x, y); }
+ assertFulfilledWith(1, foo(1, 0));
+ assertFulfilledWith(2, foo(2, 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(3, foo(3, 2));
+})();
+
+(function() {
+ function foo(x) { return Promise.resolve({x}); }
+ assertFulfilledWith({x:1}, foo(1));
+ assertFulfilledWith({x:2}, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith({x:3}, foo(3));
+})();
+
+(function() {
+ function foo(x) { return Promise.resolve(Promise.resolve(x)); }
+ assertFulfilledWith(null, foo(null));
+ assertFulfilledWith('a', foo('a'));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(42, foo(42));
+})();
+
+(function() {
+ const thenable = new class Thenable {
+ then(fulfill, reject) {
+ fulfill(1);
+ }
+ };
+ function foo() { return Promise.resolve(thenable); }
+ assertFulfilledWith(1, foo());
+ assertFulfilledWith(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(1, foo());
+})();
+
+(function() {
+ const MyPromise = class MyPromise extends Promise {};
+
+ (function() {
+ function foo() { return MyPromise.resolve(); }
+ assertFulfilledWith(undefined, foo());
+ assertFulfilledWith(undefined, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(undefined, foo());
+ })();
+
+ (function() {
+ function foo(x) { return MyPromise.resolve(x); }
+ assertFulfilledWith(3, foo(3));
+ assertFulfilledWith(3, foo(3));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(3, foo(3));
+ })();
+
+ (function() {
+ function foo(x, y) { return MyPromise.resolve(x, y); }
+ assertFulfilledWith(1, foo(1, 0));
+ assertFulfilledWith(2, foo(2, 1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(3, foo(3, 2));
+ })();
+
+ (function() {
+ function foo(x) { return MyPromise.resolve({x}); }
+ assertFulfilledWith({x:1}, foo(1));
+ assertFulfilledWith({x:2}, foo(2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith({x:3}, foo(3));
+ })();
+
+ (function() {
+ function foo(x) { return MyPromise.resolve(Promise.resolve(x)); }
+ assertFulfilledWith(null, foo(null));
+ assertFulfilledWith('a', foo('a'));
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(42, foo(42));
+ })();
+
+ (function() {
+ const thenable = new class Thenable {
+ then(fulfill, reject) {
+ fulfill(1);
+ }
+ };
+ function foo() { return MyPromise.resolve(thenable); }
+ assertFulfilledWith(1, foo());
+ assertFulfilledWith(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertFulfilledWith(1, foo());
+ })();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-803022.js b/deps/v8/test/mjsunit/compiler/regress-803022.js
new file mode 100644
index 0000000000..30e13cf032
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-803022.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ for (var a = 0; a < 2; a++) {
+ if (a === 1) %OptimizeOsr();
+ while (0 && 1) {
+ for (var j = 1; j < 2; j++) { }
+ }
+ }
+}
+
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-808472.js b/deps/v8/test/mjsunit/compiler/regress-808472.js
new file mode 100644
index 0000000000..8c8bdf1d0d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-808472.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function opt() {
+ let opt, arr = [...[...[...[...new Uint8Array(0x10000)]]]];
+ while (arr--) {
+ opt = ((typeof opt) === 'undefined') ? /a/ : arr;
+ }
+}
+opt();
+opt();
diff --git a/deps/v8/test/mjsunit/compiler/regress-815392.js b/deps/v8/test/mjsunit/compiler/regress-815392.js
new file mode 100644
index 0000000000..367c5e824d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-815392.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const __f_1 = eval(`(function __f_1() {
+ class Derived extends Object {
+ constructor() {
+ ${"this.a=1;".repeat(0x3fffe-8)}
+ }
+ }
+ return Derived;
+})`);
+assertThrows(() => new (__f_1())());
diff --git a/deps/v8/test/mjsunit/compiler/regress-817225.js b/deps/v8/test/mjsunit/compiler/regress-817225.js
new file mode 100644
index 0000000000..22f0375605
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-817225.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+function inlined(abort, n, a, b) {
+ if (abort) return;
+ var x = a ? true : "" + a;
+ if (!a) {
+ var y = n + y + 10;
+ if(!b) {
+ x = y;
+ }
+ if (x) {
+ x = false;
+ }
+ }
+ return x + 1;
+}
+inlined();
+function optimized(abort, a, b) {
+ return inlined(abort, "abc", a, b);
+}
+optimized(true);
+%OptimizeFunctionOnNextCall(optimized);
+optimized();
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 7586261c92..654dd23439 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -28,22 +28,28 @@
// Flags: --allow-natives-syntax --nostress-opt --opt
-function test(f) {
+function test(f, iterations) {
f();
f();
- %OptimizeFunctionOnNextCall(f);
- f();
+ // Some of the tests need to learn until they stabilize.
+ let n = iterations ? iterations : 1;
+ for (let i = 0; i < n; i++) {
+ %OptimizeFunctionOnNextCall(f);
+ f();
+ }
+ // Assert that the function finally stabilized.
+ assertOptimized(f);
}
test(function add() {
assertEquals(2, 1 + 1);
assertEquals(2.5, 1.25 + 1.25);
- assertEquals("Infinity", String(Infinity + Infinity));
- assertEquals("Infinity", String(Infinity + 3));
- assertEquals("NaN", String(Infinity + (-Infinity)));
- assertEquals("NaN", String(NaN + 2));
- assertEquals("-Infinity", String(1 / (-0.0 + (-0.0))));
- assertEquals("Infinity", String(1 / (-0.0 + 0.0)));
+ assertSame(Infinity, Infinity + Infinity);
+ assertSame(Infinity, Infinity + 3);
+ assertSame(NaN, Infinity + (-Infinity));
+ assertSame(NaN, NaN + 2);
+ assertSame(-Infinity, 1 / (-0.0 + (-0.0)));
+ assertSame(Infinity, 1 / (-0.0 + 0.0));
});
test(function inc() {
@@ -52,9 +58,9 @@ test(function inc() {
var c = -Infinity;
var d = NaN;
assertEquals(2, ++a);
- assertEquals("Infinity", String(++b));
- assertEquals("-Infinity", String(++c));
- assertEquals("NaN", String(++d));
+ assertSame(Infinity, ++b);
+ assertSame(-Infinity, ++c);
+ assertSame(NaN, ++d);
});
test(function dec() {
@@ -63,155 +69,155 @@ test(function dec() {
var c = -Infinity;
var d = NaN;
assertEquals(0, --a);
- assertEquals("Infinity", String(--b));
- assertEquals("-Infinity", String(--c));
- assertEquals("NaN", String(--d));
+ assertSame(Infinity, --b);
+ assertSame(-Infinity, --c);
+ assertSame(NaN, --d);
});
test(function sub() {
assertEquals(0, 1 - 1);
assertEquals(0.5, 1.5 - 1);
- assertEquals("Infinity", String(Infinity - (-Infinity)));
- assertEquals("Infinity", String(Infinity - 3));
- assertEquals("NaN", String(Infinity - Infinity));
- assertEquals("NaN", String(NaN - 2));
- assertEquals("-Infinity", String(1 / (-0.0 - 0.0)));
- assertEquals("Infinity", String(1 / (0.0 - 0.0)));
+ assertSame(Infinity, Infinity - (-Infinity));
+ assertSame(Infinity, Infinity - 3);
+ assertSame(NaN, Infinity - Infinity);
+ assertSame(NaN, NaN - 2);
+ assertSame(-Infinity, 1 / (-0.0 - 0.0));
+ assertSame(Infinity, 1 / (0.0 - 0.0));
});
test(function mul() {
assertEquals(1, 1 * 1);
assertEquals(2.25, 1.5 * 1.5);
- assertEquals("Infinity", String(Infinity * Infinity));
- assertEquals("-Infinity", String(Infinity * (-Infinity)));
- assertEquals("Infinity", String(Infinity * 3));
- assertEquals("-Infinity", String(Infinity * (-3)));
- assertEquals("NaN", String(NaN * 3));
- assertEquals("-Infinity", String(1 / (-0.0 * 0.0)));
- assertEquals("Infinity", String(1 / (0.0 * 0.0)));
+ assertSame(Infinity, Infinity * Infinity);
+ assertSame(-Infinity, Infinity * (-Infinity));
+ assertSame(Infinity, Infinity * 3);
+ assertSame(-Infinity, Infinity * (-3));
+ assertSame(NaN, NaN * 3);
+ assertSame(-Infinity, 1 / (-0.0 * 0.0));
+ assertSame(Infinity, 1 / (0.0 * 0.0));
});
test(function div() {
assertEquals(1, 1 / 1);
assertEquals(1.5, 2.25 / 1.5);
- assertEquals("NaN", String(Infinity / Infinity));
- assertEquals("Infinity", String(Infinity / 3));
- assertEquals("-Infinity", String(Infinity / (-3)));
- assertEquals("NaN", String(NaN / 3));
- assertEquals("-Infinity", String(1 / (-0.0)));
- assertEquals("Infinity", String(Infinity/0.0));
+ assertSame(NaN, Infinity / Infinity);
+ assertSame(Infinity, Infinity / 3);
+ assertSame(-Infinity, Infinity / (-3));
+ assertSame(NaN, NaN / 3);
+ assertSame(-Infinity, 1 / (-0.0));
+ assertSame(Infinity, Infinity/0.0);
});
test(function mathMin() {
assertEquals(1, Math.min(1, 10));
assertEquals(1.5, Math.min(1.5, 2.5));
assertEquals(0, Math.min(Infinity, 0));
- assertEquals("Infinity", String(Math.min(Infinity, Infinity)));
- assertEquals("-Infinity", String(Math.min(Infinity, -Infinity)));
- assertEquals("NaN", String(Math.min(NaN, 1)));
- assertEquals("Infinity", String(1 / Math.min(0.0, 0.0)));
- assertEquals("-Infinity", String(1 / Math.min(-0.0, -0.0)));
- assertEquals("-Infinity", String(1 / Math.min(0.0, -0.0)));
+ assertSame(Infinity, Math.min(Infinity, Infinity));
+ assertSame(-Infinity, Math.min(Infinity, -Infinity));
+ assertSame(NaN, Math.min(NaN, 1));
+ assertSame(Infinity, 1 / Math.min(0.0, 0.0));
+ assertSame(-Infinity, 1 / Math.min(-0.0, -0.0));
+ assertSame(-Infinity, 1 / Math.min(0.0, -0.0));
});
test(function mathMax() {
assertEquals(10, Math.max(1, 10));
assertEquals(2.5, Math.max(1.5, 2.5));
assertEquals(Infinity, Math.max(Infinity, 0));
- assertEquals("-Infinity", String(Math.max(-Infinity, -Infinity)));
- assertEquals("Infinity", String(Math.max(Infinity, -Infinity)));
- assertEquals("NaN", String(Math.max(NaN, 1)));
- assertEquals("Infinity", String(1 / Math.max(0.0, 0.0)));
- assertEquals("-Infinity", String(1 / Math.max(-0.0, -0.0)));
- assertEquals("Infinity", String(1 / Math.max(0.0, -0.0)));
+ assertSame(-Infinity, Math.max(-Infinity, -Infinity));
+ assertSame(Infinity, Math.max(Infinity, -Infinity));
+ assertSame(NaN, Math.max(NaN, 1));
+ assertSame(Infinity, 1 / Math.max(0.0, 0.0));
+ assertSame(-Infinity, 1 / Math.max(-0.0, -0.0));
+ assertSame(Infinity, 1 / Math.max(0.0, -0.0));
});
test(function mathExp() {
assertEquals(1.0, Math.exp(0.0));
assertTrue(2.7 < Math.exp(1) && Math.exp(1) < 2.8);
- assertEquals("Infinity", String(Math.exp(Infinity)));
+ assertSame(Infinity, Math.exp(Infinity));
assertEquals("0", String(Math.exp(-Infinity)));
- assertEquals("NaN", String(Math.exp(NaN)));
+ assertSame(NaN, Math.exp(NaN));
});
test(function mathLog() {
assertEquals(0.0, Math.log(1.0));
assertTrue(1 < Math.log(3) && Math.log(3) < 1.5);
- assertEquals("Infinity", String(Math.log(Infinity)));
- assertEquals("NaN", String(Math.log(-Infinity)));
- assertEquals("NaN", String(Math.exp(NaN)));
+ assertSame(Infinity, Math.log(Infinity));
+ assertSame(NaN, Math.log(-Infinity));
+ assertSame(NaN, Math.exp(NaN));
});
test(function mathSqrt() {
assertEquals(1.0, Math.sqrt(1.0));
- assertEquals("NaN", String(Math.sqrt(-1.0)));
- assertEquals("Infinity", String(Math.sqrt(Infinity)));
- assertEquals("NaN", String(Math.sqrt(-Infinity)));
- assertEquals("NaN", String(Math.sqrt(NaN)));
+ assertSame(NaN, Math.sqrt(-1.0));
+ assertSame(Infinity, Math.sqrt(Infinity));
+ assertSame(NaN, Math.sqrt(-Infinity));
+ assertSame(NaN, Math.sqrt(NaN));
});
test(function mathPowHalf() {
assertEquals(1.0, Math.pow(1.0, 0.5));
- assertEquals("NaN", String(Math.sqrt(-1.0)));
- assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
- assertEquals("NaN", String(Math.sqrt(-Infinity, 0.5)));
+ assertSame(NaN, Math.sqrt(-1.0));
+ assertSame(Infinity, Math.pow(Infinity, 0.5));
+ assertSame(NaN, Math.sqrt(-Infinity, 0.5));
assertEquals(0, Math.pow(Infinity, -0.5));
- assertEquals("NaN", String(Math.sqrt(-Infinity, -0.5)));
- assertEquals("NaN", String(Math.sqrt(NaN, 0.5)));
+ assertSame(NaN, Math.sqrt(-Infinity, -0.5));
+ assertSame(NaN, Math.sqrt(NaN, 0.5));
});
test(function mathAbs() {
assertEquals(1.5, Math.abs(1.5));
assertEquals(1.5, Math.abs(-1.5));
- assertEquals("Infinity", String(Math.abs(Infinity)));
- assertEquals("Infinity", String(Math.abs(-Infinity)));
- assertEquals("NaN", String(Math.abs(NaN)));
+ assertSame(Infinity, Math.abs(Infinity));
+ assertSame(Infinity, Math.abs(-Infinity));
+ assertSame(NaN, Math.abs(NaN));
});
test(function mathRound() {
assertEquals(2, Math.round(1.5));
assertEquals(-1, Math.round(-1.5));
- assertEquals("Infinity", String(Math.round(Infinity)));
- assertEquals("-Infinity", String(Math.round(-Infinity)));
- assertEquals("Infinity", String(1 / Math.round(0.0)));
- assertEquals("-Infinity", String(1 / Math.round(-0.0)));
- assertEquals("NaN", String(Math.round(NaN)));
+ assertSame(Infinity, Math.round(Infinity));
+ assertSame(-Infinity, Math.round(-Infinity));
+ assertSame(Infinity, 1 / Math.round(0.0));
+ assertSame(-Infinity, 1 / Math.round(-0.0));
+ assertSame(NaN, Math.round(NaN));
assertEquals(Math.pow(2, 52) + 1, Math.round(Math.pow(2, 52) + 1));
});
test(function mathFround() {
assertTrue(isNaN(Math.fround(NaN)));
- assertEquals("Infinity", String(1/Math.fround(0)));
- assertEquals("-Infinity", String(1/Math.fround(-0)));
- assertEquals("Infinity", String(Math.fround(Infinity)));
- assertEquals("-Infinity", String(Math.fround(-Infinity)));
- assertEquals("Infinity", String(Math.fround(1E200)));
- assertEquals("-Infinity", String(Math.fround(-1E200)));
+ assertSame(Infinity, 1/Math.fround(0));
+ assertSame(-Infinity, 1/Math.fround(-0));
+ assertSame(Infinity, Math.fround(Infinity));
+ assertSame(-Infinity, Math.fround(-Infinity));
+ assertSame(Infinity, Math.fround(1E200));
+ assertSame(-Infinity, Math.fround(-1E200));
assertEquals(3.1415927410125732, Math.fround(Math.PI));
});
test(function mathFloor() {
assertEquals(1, Math.floor(1.5));
assertEquals(-2, Math.floor(-1.5));
- assertEquals("Infinity", String(Math.floor(Infinity)));
- assertEquals("-Infinity", String(Math.floor(-Infinity)));
- assertEquals("Infinity", String(1 / Math.floor(0.0)));
- assertEquals("-Infinity", String(1 / Math.floor(-0.0)));
- assertEquals("NaN", String(Math.floor(NaN)));
+ assertSame(Infinity, Math.floor(Infinity));
+ assertSame(-Infinity, Math.floor(-Infinity));
+ assertSame(Infinity, 1 / Math.floor(0.0));
+ assertSame(-Infinity, 1 / Math.floor(-0.0));
+ assertSame(NaN, Math.floor(NaN));
assertEquals(Math.pow(2, 52) + 1, Math.floor(Math.pow(2, 52) + 1));
});
test(function mathPow() {
assertEquals(2.25, Math.pow(1.5, 2));
assertTrue(1.8 < Math.pow(1.5, 1.5) && Math.pow(1.5, 1.5) < 1.9);
- assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
- assertEquals("Infinity", String(Math.pow(-Infinity, 0.5)));
+ assertSame(Infinity, Math.pow(Infinity, 0.5));
+ assertSame(Infinity, Math.pow(-Infinity, 0.5));
assertEquals(0, Math.pow(Infinity, -0.5));
assertEquals(0, Math.pow(Infinity, -0.5));
- assertEquals("Infinity", String(Math.pow(Infinity, Infinity)));
+ assertSame(Infinity, Math.pow(Infinity, Infinity));
assertEquals(0, Math.pow(Infinity, -Infinity));
- assertEquals("NaN", String(Math.pow(Infinity, NaN)));
- assertEquals("NaN", String(Math.pow(NaN, 2)));
+ assertSame(NaN, Math.pow(Infinity, NaN));
+ assertSame(NaN, Math.pow(NaN, 2));
});
test(function stringAdd() {
@@ -226,24 +232,48 @@ test(function stringLength() {
assertEquals(-5, { length: -5 }.length);
});
-test(function stringCharCodeAt() {
- assertEquals(99, "abc".charCodeAt(2));
- assertEquals("NaN", String("abc".charCodeAt(-1)));
- assertEquals("NaN", String("abc".charCodeAt(4)));
- assertEquals(98, "abc".charCodeAt(1.1));
- assertEquals("NaN", String("abc".charCodeAt(4.1)));
- assertEquals("NaN", String("abc".charCodeAt(1 + 4294967295)));
-});
-
test(function stringCharAt() {
assertEquals("c", "abc".charAt(2));
assertEquals("", "abc".charAt(-1));
assertEquals("", "abc".charAt(4));
assertEquals("b", "abc".charAt(1.1));
assertEquals("", "abc".charAt(4.1));
- assertEquals("", String("abc".charAt(1 + 4294967295)));
-});
+ assertEquals("", "abc".charAt(Infinity));
+ assertEquals("", "abc".charAt(-Infinity));
+ assertEquals("a", "abc".charAt(-0));
+ assertEquals("a", "abc".charAt(+0));
+ assertEquals("", "".charAt());
+ assertEquals("", "abc".charAt(1 + 4294967295));
+}, 10);
+
+test(function stringCharCodeAt() {
+ assertSame(99, "abc".charCodeAt(2));
+ assertSame(NaN, "abc".charCodeAt(-1));
+ assertSame(NaN, "abc".charCodeAt(4));
+ assertSame(98, "abc".charCodeAt(1.1));
+ assertSame(NaN, "abc".charCodeAt(4.1));
+ assertSame(NaN, "abc".charCodeAt(Infinity));
+ assertSame(NaN, "abc".charCodeAt(-Infinity));
+ assertSame(97, "abc".charCodeAt(-0));
+ assertSame(97, "abc".charCodeAt(+0));
+ assertSame(NaN, "".charCodeAt());
+ assertSame(NaN, "abc".charCodeAt(1 + 4294967295));
+}, 10);
+test(function stringCodePointAt() {
+ assertSame(65533, "äϠ�𝌆".codePointAt(2));
+ assertSame(119558, "äϠ�𝌆".codePointAt(3));
+ assertSame(undefined, "äϠ�".codePointAt(-1));
+ assertSame(undefined, "äϠ�".codePointAt(4));
+ assertSame(992, "äϠ�".codePointAt(1.1));
+ assertSame(undefined, "äϠ�".codePointAt(4.1));
+ assertSame(undefined, "äϠ�".codePointAt(Infinity));
+ assertSame(undefined, "äϠ�".codePointAt(-Infinity));
+ assertSame(228, "äϠ�".codePointAt(-0));
+ assertSame(97, "aϠ�".codePointAt(+0));
+ assertSame(undefined, "".codePointAt());
+ assertSame(undefined, "äϠ�".codePointAt(1 + 4294967295));
+}, 10);
test(function int32Mod() {
assertEquals(-0, -2147483648 % (-1));
diff --git a/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
index 09586c3a11..0a15413ea3 100644
--- a/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/d8/d8-worker-sharedarraybuffer.js
@@ -28,17 +28,17 @@
// Flags: --harmony-sharedarraybuffer
if (this.Worker) {
- (function TestTransfer() {
+ (function TestClone() {
var workerScript =
`onmessage = function(m) {
var sab = m;
var ta = new Uint32Array(sab);
if (sab.byteLength !== 16) {
- throw new Error('SharedArrayBuffer transfer byteLength');
+ throw new Error('SharedArrayBuffer clone byteLength');
}
for (var i = 0; i < 4; ++i) {
if (ta[i] !== i) {
- throw new Error('SharedArrayBuffer transfer value ' + i);
+ throw new Error('SharedArrayBuffer clone value ' + i);
}
}
// Atomically update ta[0]
@@ -53,7 +53,7 @@ if (this.Worker) {
ta[i] = i;
}
- // Transfer SharedArrayBuffer
+ // Clone SharedArrayBuffer
w.postMessage(sab);
assertEquals(16, sab.byteLength); // ArrayBuffer should not be neutered.
@@ -68,7 +68,7 @@ if (this.Worker) {
assertEquals(16, sab.byteLength); // Still not neutered.
})();
- (function TestTransferMulti() {
+ (function TestCloneMulti() {
var workerScript =
`onmessage = function(msg) {
var sab = msg.sab;
diff --git a/deps/v8/test/mjsunit/default-nospec.js b/deps/v8/test/mjsunit/default-nospec.js
new file mode 100644
index 0000000000..0e3b6c1f55
--- /dev/null
+++ b/deps/v8/test/mjsunit/default-nospec.js
@@ -0,0 +1,84 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+(()=> {
+ function f(a, b, c) {
+ return String.prototype.indexOf.call(a, b, c);
+ }
+ f("abc", "de", 1);
+ f("abc", "de", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", "de", {});
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", "de", {});
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a, b, c) {
+ return String.prototype.indexOf.apply(a, [b, c]);
+ }
+ f("abc", "de", 1);
+ f("abc", "de", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {}, 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {}, 1);
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a, b, c) {
+ return Reflect.apply(String.prototype.indexOf, a, [b, c]);
+ }
+ f("abc", "de", 1);
+ f("abc", "de", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f({}, "de", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f({}, "de", 1);
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a, b) {
+ return String.fromCharCode.call(a, b);
+ }
+ f("abc", 1);
+ f("abc", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {});
+ %OptimizeFunctionOnNextCall(f);
+ f({}, {});
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a, b) {
+ return String.fromCharCode.apply(undefined, [b, {}]);
+ }
+ f("abc", 1);
+ f("abc", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {});
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {});
+ assertOptimized(f);
+})();
+
+
+(()=> {
+ function f(a, b) {
+ return Reflect.apply(String.fromCharCode, a, [b, {}]);
+ }
+ f("abc", 1);
+ f("abc", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {});
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {});
+ assertOptimized(f);
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-from.js b/deps/v8/test/mjsunit/es6/array-from.js
index c483d3deb6..02a599d4ca 100644
--- a/deps/v8/test/mjsunit/es6/array-from.js
+++ b/deps/v8/test/mjsunit/es6/array-from.js
@@ -161,6 +161,14 @@ assertThrows(function () { Array.from.call(exotic, [1]); }, TypeError);
// The setter wasn't called
assertEquals(0, setterCalled);
+// Non-callable iterators should cause a TypeError before calling the target
+// constructor.
+items = {};
+items[Symbol.iterator] = 7;
+function TestError() {}
+function ArrayLike() { throw new TestError() }
+assertThrows(function() { Array.from.call(ArrayLike, items); }, TypeError);
+
// Check that array properties defined are writable, enumerable, configurable
function ordinary() { }
var x = Array.from.call(ordinary, [2]);
diff --git a/deps/v8/test/mjsunit/es6/array-iterator.js b/deps/v8/test/mjsunit/es6/array-iterator.js
index d2d19b059d..b143c8c034 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator.js
@@ -152,10 +152,6 @@ function TestArrayIteratorPrototype() {
assertEquals(Object.prototype, ArrayIteratorPrototype.__proto__);
- assertEquals('Array Iterator', %_ClassOf(array[Symbol.iterator]()));
- assertEquals('Array Iterator', %_ClassOf(array.keys()));
- assertEquals('Array Iterator', %_ClassOf(array.entries()));
-
assertFalse(ArrayIteratorPrototype.hasOwnProperty('constructor'));
assertArrayEquals(['next'],
Object.getOwnPropertyNames(ArrayIteratorPrototype));
diff --git a/deps/v8/test/mjsunit/es6/classof-proxy.js b/deps/v8/test/mjsunit/es6/classof-proxy.js
deleted file mode 100644
index 02043614ba..0000000000
--- a/deps/v8/test/mjsunit/es6/classof-proxy.js
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-function test_function(o) {
- if (%_ClassOf(o) === "Function") {
- return true;
- } else {
- return false;
- }
-}
-
-var non_callable = new Proxy({}, {});
-var callable = new Proxy(function(){}.__proto__, {});
-var constructable = new Proxy(function(){}, {});
-
-assertFalse(test_function(non_callable));
-assertTrue(test_function(callable));
-assertTrue(test_function(constructable));
-
-%OptimizeFunctionOnNextCall(test_function);
-
-assertFalse(test_function(non_callable));
-assertTrue(test_function(callable));
-assertTrue(test_function(constructable));
diff --git a/deps/v8/test/mjsunit/es6/collection-iterator.js b/deps/v8/test/mjsunit/es6/collection-iterator.js
index 5a9b2f54e6..8257d96664 100644
--- a/deps/v8/test/mjsunit/es6/collection-iterator.js
+++ b/deps/v8/test/mjsunit/es6/collection-iterator.js
@@ -14,7 +14,6 @@ function test(f) {
test(function TestSetIterator() {
var s = new Set;
var iter = s.values();
- assertEquals('Set Iterator', %_ClassOf(iter));
var SetIteratorPrototype = iter.__proto__;
assertFalse(SetIteratorPrototype.hasOwnProperty('constructor'));
@@ -160,7 +159,6 @@ test(function TestSetIteratorSymbol() {
test(function TestMapIterator() {
var m = new Map;
var iter = m.values();
- assertEquals('Map Iterator', %_ClassOf(iter));
var MapIteratorPrototype = iter.__proto__;
assertFalse(MapIteratorPrototype.hasOwnProperty('constructor'));
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
new file mode 100644
index 0000000000..e4b52bc5c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
@@ -0,0 +1,65 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestSetWithCustomIterator(ctor) {
+ const k1 = {};
+ const k2 = {};
+ const entries = [k1];
+ let callCount = 0;
+ entries[Symbol.iterator] = () => ({
+ next: () =>
+ callCount++ === 0
+ ? { value: k2, done: false }
+ : { done: true }
+ });
+ const set = new ctor(entries);
+ assertFalse(set.has(k1));
+ assertTrue(set.has(k2));
+ assertEquals(2, callCount);
+}
+TestSetWithCustomIterator(Set);
+TestSetWithCustomIterator(Set);
+TestSetWithCustomIterator(Set);
+%OptimizeFunctionOnNextCall(TestSetWithCustomIterator);
+TestSetWithCustomIterator(Set);
+assertOptimized(TestSetWithCustomIterator);
+
+TestSetWithCustomIterator(WeakSet);
+TestSetWithCustomIterator(WeakSet);
+TestSetWithCustomIterator(WeakSet);
+%OptimizeFunctionOnNextCall(TestSetWithCustomIterator);
+TestSetWithCustomIterator(WeakSet);
+assertOptimized(TestSetWithCustomIterator);
+
+function TestMapWithCustomIterator(ctor) {
+ const k1 = {};
+ const k2 = {};
+ const entries = [[k1, 1]];
+ let callCount = 0;
+ entries[Symbol.iterator] = () => ({
+ next: () =>
+ callCount++ === 0
+ ? { value: [k2, 2], done: false }
+ : { done: true }
+ });
+ const map = new ctor(entries);
+ assertFalse(map.has(k1));
+ assertEquals(2, map.get(k2));
+ assertEquals(2, callCount);
+}
+TestMapWithCustomIterator(Map);
+TestMapWithCustomIterator(Map);
+TestMapWithCustomIterator(Map);
+%OptimizeFunctionOnNextCall(TestMapWithCustomIterator);
+TestMapWithCustomIterator(Map);
+assertOptimized(TestMapWithCustomIterator);
+
+TestMapWithCustomIterator(WeakMap);
+TestMapWithCustomIterator(WeakMap);
+TestMapWithCustomIterator(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapWithCustomIterator);
+TestMapWithCustomIterator(WeakMap);
+assertOptimized(TestMapWithCustomIterator);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js b/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
new file mode 100644
index 0000000000..50308fdde3
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
@@ -0,0 +1,80 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestSetWithModifiedIterator(ctor) {
+ const k1 = {};
+ const k2 = {};
+ const entries = [k1, k2];
+ const arrayIteratorProto = Object.getPrototypeOf(entries[Symbol.iterator]());
+ const originalNext = arrayIteratorProto.next;
+ let callCount = 0;
+ arrayIteratorProto.next = function() {
+ callCount++;
+ return originalNext.call(this);
+ };
+
+ const set = new ctor(entries);
+ assertEquals(3, callCount); // +1 for iterator done
+
+ if('size' in set) assertEquals(2, set.size);
+ assertTrue(set.has(k1));
+ assertTrue(set.has(k2));
+
+ arrayIteratorProto.next = originalNext;
+}
+TestSetWithModifiedIterator(Set);
+TestSetWithModifiedIterator(Set);
+TestSetWithModifiedIterator(Set);
+%OptimizeFunctionOnNextCall(TestSetWithModifiedIterator);
+TestSetWithModifiedIterator(Set);
+assertOptimized(TestSetWithModifiedIterator);
+%DeoptimizeFunction(TestSetWithModifiedIterator);
+
+TestSetWithModifiedIterator(WeakSet);
+TestSetWithModifiedIterator(WeakSet);
+TestSetWithModifiedIterator(WeakSet);
+%OptimizeFunctionOnNextCall(TestSetWithModifiedIterator);
+TestSetWithModifiedIterator(WeakSet);
+assertOptimized(TestSetWithModifiedIterator);
+%DeoptimizeFunction(TestSetWithModifiedIterator);
+
+
+function TestMapWithModifiedIterator(ctor) {
+ const k1 = {};
+ const k2 = {};
+ const entries = [[k1, 1], [k2, 2]];
+ const arrayIteratorProto = Object.getPrototypeOf(entries[Symbol.iterator]());
+ const originalNext = arrayIteratorProto.next;
+ let callCount = 0;
+ arrayIteratorProto.next = function() {
+ callCount++;
+ return originalNext.call(this);
+ };
+
+ const set = new ctor(entries);
+ assertEquals(3, callCount); // +1 for iterator done
+
+ if('size' in set) assertEquals(2, set.size);
+ assertEquals(1, set.get(k1));
+ assertEquals(2, set.get(k2));
+
+ arrayIteratorProto.next = originalNext;
+}
+TestMapWithModifiedIterator(Map);
+TestMapWithModifiedIterator(Map);
+TestMapWithModifiedIterator(Map);
+%OptimizeFunctionOnNextCall(TestMapWithModifiedIterator);
+TestMapWithModifiedIterator(Map);
+assertOptimized(TestMapWithModifiedIterator);
+%DeoptimizeFunction(TestMapWithModifiedIterator);
+
+TestMapWithModifiedIterator(WeakMap);
+TestMapWithModifiedIterator(WeakMap);
+TestMapWithModifiedIterator(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapWithModifiedIterator);
+TestMapWithModifiedIterator(WeakMap);
+assertOptimized(TestMapWithModifiedIterator);
+%DeoptimizeFunction(TestMapWithModifiedIterator);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
new file mode 100644
index 0000000000..cc441b1ad4
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
@@ -0,0 +1,65 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestSetWithCustomIterator(ctor) {
+ const k1 = {};
+ const k2 = {};
+ let callCount = 0;
+ Array.prototype[Symbol.iterator] = () => ({
+ next: () =>
+ callCount++ === 0
+ ? { value: k2, done: false }
+ : { done: true }
+ });
+ const entries = [k1];
+ const set = new ctor(entries);
+ assertFalse(set.has(k1));
+ assertTrue(set.has(k2));
+ assertEquals(2, callCount);
+}
+TestSetWithCustomIterator(Set);
+TestSetWithCustomIterator(Set);
+TestSetWithCustomIterator(Set);
+%OptimizeFunctionOnNextCall(TestSetWithCustomIterator);
+TestSetWithCustomIterator(Set);
+assertOptimized(TestSetWithCustomIterator);
+
+TestSetWithCustomIterator(WeakSet);
+TestSetWithCustomIterator(WeakSet);
+TestSetWithCustomIterator(WeakSet);
+%OptimizeFunctionOnNextCall(TestSetWithCustomIterator);
+TestSetWithCustomIterator(WeakSet);
+assertOptimized(TestSetWithCustomIterator);
+
+function TestMapWithCustomIterator(ctor) {
+ const k1 = {};
+ const k2 = {};
+ let callCount = 0;
+ Array.prototype[Symbol.iterator] = () => ({
+ next: () =>
+ callCount++ === 0
+ ? { value: [k2, 2], done: false }
+ : { done: true }
+ });
+ const entries = [[k1, 1]];
+ const map = new ctor(entries);
+ assertFalse(map.has(k1));
+ assertEquals(2, map.get(k2));
+ assertEquals(2, callCount);
+}
+TestMapWithCustomIterator(Map);
+TestMapWithCustomIterator(Map);
+TestMapWithCustomIterator(Map);
+%OptimizeFunctionOnNextCall(TestMapWithCustomIterator);
+TestMapWithCustomIterator(Map);
+assertOptimized(TestMapWithCustomIterator);
+
+TestMapWithCustomIterator(WeakMap);
+TestMapWithCustomIterator(WeakMap);
+TestMapWithCustomIterator(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapWithCustomIterator);
+TestMapWithCustomIterator(WeakMap);
+assertOptimized(TestMapWithCustomIterator);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
new file mode 100644
index 0000000000..a427895243
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
@@ -0,0 +1,76 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestSetPrototypeModified(ctor) {
+ const originalPrototypeAdd = ctor.prototype.add;
+ const k1 = {};
+ const k2 = {};
+ const entries = [k1, k2];
+ let addCount = 0;
+
+ ctor.prototype.add = function(value) {
+ addCount++;
+ originalPrototypeAdd.call(this, value);
+ entries.length = 1;
+ };
+ const set = new ctor(entries);
+
+ assertEquals(1, addCount);
+ assertTrue(set.has(k1));
+ assertFalse(set.has(k2));
+
+ ctor.prototype.add = originalPrototypeAdd;
+}
+TestSetPrototypeModified(Set);
+TestSetPrototypeModified(Set);
+TestSetPrototypeModified(Set);
+%OptimizeFunctionOnNextCall(TestSetPrototypeModified);
+TestSetPrototypeModified(Set);
+assertOptimized(TestSetPrototypeModified);
+%DeoptimizeFunction(TestSetPrototypeModified);
+
+TestSetPrototypeModified(WeakSet);
+TestSetPrototypeModified(WeakSet);
+TestSetPrototypeModified(WeakSet);
+%OptimizeFunctionOnNextCall(TestSetPrototypeModified);
+TestSetPrototypeModified(WeakSet);
+assertOptimized(TestSetPrototypeModified);
+%DeoptimizeFunction(TestSetPrototypeModified);
+
+function TestMapPrototypeModified(ctor) {
+ const originalPrototypeSet = ctor.prototype.set;
+ const k1 = {};
+ const k2 = {};
+ const entries = [[k1, 1], [k2, 2]];
+ let setCount = 0;
+
+ ctor.prototype.set = function(key, value) {
+ setCount++;
+ originalPrototypeSet.call(this, key, value);
+ entries.length = 1;
+ };
+ const map = new ctor(entries);
+
+ assertEquals(1, setCount);
+ assertTrue(map.has(k1));
+ assertFalse(map.has(k2));
+
+ ctor.prototype.set = originalPrototypeSet;
+}
+TestMapPrototypeModified(Map);
+TestMapPrototypeModified(Map);
+TestMapPrototypeModified(Map);
+%OptimizeFunctionOnNextCall(TestMapPrototypeModified);
+TestMapPrototypeModified(Map);
+assertOptimized(TestMapPrototypeModified);
+%DeoptimizeFunction(TestMapPrototypeModified);
+
+TestMapPrototypeModified(WeakMap);
+TestMapPrototypeModified(WeakMap);
+TestMapPrototypeModified(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapPrototypeModified);
+TestMapPrototypeModified(WeakMap);
+assertOptimized(TestMapPrototypeModified);
diff --git a/deps/v8/test/mjsunit/es6/collections.js b/deps/v8/test/mjsunit/es6/collections.js
index 1664a93bde..feae629439 100644
--- a/deps/v8/test/mjsunit/es6/collections.js
+++ b/deps/v8/test/mjsunit/es6/collections.js
@@ -307,17 +307,6 @@ assertTrue(WeakSet.prototype.has instanceof Function)
assertTrue(WeakSet.prototype.delete instanceof Function)
-// Test class of instance and prototype.
-assertEquals("Set", %_ClassOf(new Set))
-assertEquals("Object", %_ClassOf(Set.prototype))
-assertEquals("Map", %_ClassOf(new Map))
-assertEquals("Object", %_ClassOf(Map.prototype))
-assertEquals("WeakMap", %_ClassOf(new WeakMap))
-assertEquals("Object", %_ClassOf(WeakMap.prototype))
-assertEquals("WeakSet", %_ClassOf(new WeakSet))
-assertEquals("Object", %_ClassOf(WeakMap.prototype))
-
-
// Test name of constructor.
assertEquals("Set", Set.name);
assertEquals("Map", Map.name);
diff --git a/deps/v8/test/mjsunit/es6/generators-objects.js b/deps/v8/test/mjsunit/es6/generators-objects.js
index 2cc359f911..ff216d43e4 100644
--- a/deps/v8/test/mjsunit/es6/generators-objects.js
+++ b/deps/v8/test/mjsunit/es6/generators-objects.js
@@ -55,7 +55,6 @@ function TestGeneratorObject() {
var iter = g();
assertSame(g.prototype, Object.getPrototypeOf(iter));
assertTrue(iter instanceof g);
- assertEquals("Generator", %_ClassOf(iter));
assertEquals("[object Generator]", String(iter));
assertEquals([], Object.getOwnPropertyNames(iter));
assertTrue(iter !== g());
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js
new file mode 100644
index 0000000000..813fffccf7
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestMapConstructorEntrySideEffect(ctor) {
+ const k1 = {};
+ const k2 = {};
+ const k3 = {};
+ let callCount = 0;
+ const input = [
+ Object.defineProperty([, 1], "0", {
+ get() {
+ input.length = 2;
+ return k1;
+ }
+ }),
+ [k2, 2],
+ Object.defineProperty([, 3], "0", {
+ get() {
+ callCount++;
+ return k3;
+ }
+ })
+ ];
+ const col = new ctor(input);
+
+ assertEquals(0, callCount);
+ if ('size' in col) assertEquals(2, col.size);
+ assertEquals(col.get(k1), 1);
+ assertEquals(col.get(k2), 2);
+ assertFalse(col.has(k3));
+}
+
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(Map);
+assertOptimized(TestMapConstructorEntrySideEffect);
+
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(WeakMap);
+assertOptimized(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
new file mode 100644
index 0000000000..0c167c1bfa
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
@@ -0,0 +1,53 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestMapConstructorEntrySideEffect(ctor) {
+ const originalPrototypeSet = ctor.prototype.set;
+ const k1 = {};
+ const k2 = {};
+ let callCount = 0;
+ const input = [
+ Object.defineProperty([, 1], "0", {
+ get() {
+ // Verify continuation retains original set function
+ ctor.prototype.set = () => {
+ callCount++;
+ };
+ return k1;
+ }
+ }),
+ [k2, 2]
+ ];
+ const col = new ctor(input);
+
+ assertEquals(0, callCount);
+ if ('size' in col) assertEquals(2, col.size);
+ assertTrue(col.has(k1));
+ assertTrue(col.has(k2));
+
+ const col2 = new ctor(input);
+
+ assertEquals(2, callCount);
+ if ('size' in col) assertEquals(0, col2.size);
+ assertFalse(col2.has(k1));
+ assertFalse(col2.has(k2));
+
+ ctor.prototype.set = originalPrototypeSet;
+}
+
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(Map);
+assertOptimized(TestMapConstructorEntrySideEffect);
+
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(WeakMap);
+assertOptimized(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
new file mode 100644
index 0000000000..7dd7aa7852
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
@@ -0,0 +1,43 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestMapConstructorEntrySideEffect(ctor) {
+ const k1 = {};
+ const k2 = {};
+ const k3 = {};
+ const input = [
+ Object.defineProperty([, 1], "0", {
+ get() {
+ // Verify continuation accesses properly accesses subsequent entries
+ Object.defineProperty(input, "1", {
+ get: () => [k3, 3]
+ });
+ return k1;
+ }
+ }),
+ [k2, 2]
+ ];
+ const col = new ctor(input);
+
+ if ('size' in col) assertEquals(2, col.size);
+ assertTrue(col.has(k1));
+ assertFalse(col.has(k2));
+ assertTrue(col.has(k3));
+}
+
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(Map);
+assertOptimized(TestMapConstructorEntrySideEffect);
+
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(WeakMap);
+assertOptimized(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js
new file mode 100644
index 0000000000..ebf8c790ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js
@@ -0,0 +1,53 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestMapConstructorEntrySideEffect(ctor) {
+ const k1 = {};
+ const k2 = {};
+ const k3 = {};
+ let firstEntryCallCount = 0;
+ let lastEntryCallCount = 0;
+ const input = [
+ Object.defineProperty([, 1], "0", {
+ get() {
+ // Verify handling of a non-Smi array length
+ input.length = 2 ** 32 - 2;
+ firstEntryCallCount++;
+ return k1;
+ }
+ }),
+ [k2, 2],
+ Object.defineProperty([k3, ], "1", {
+ get() {
+ input.length = 1;
+ lastEntryCallCount++;
+ return 3;
+ }
+ })
+ ];
+ const col = new ctor(input);
+
+ assertEquals(1, firstEntryCallCount,);
+ assertEquals(1, lastEntryCallCount);
+ if ('size' in col) assertEquals(3, col.size);
+ assertEquals(1, col.get(k1));
+ assertEquals(2, col.get(k2));
+ assertEquals(3, col.get(k3));
+}
+
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+TestMapConstructorEntrySideEffect(Map);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(Map);
+assertOptimized(TestMapConstructorEntrySideEffect);
+
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+TestMapConstructorEntrySideEffect(WeakMap);
+%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
+TestMapConstructorEntrySideEffect(WeakMap);
+assertOptimized(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/promise-resolve-thenable-job.js b/deps/v8/test/mjsunit/es6/promise-resolve-thenable-job.js
new file mode 100644
index 0000000000..70ab6cda96
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/promise-resolve-thenable-job.js
@@ -0,0 +1,127 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ let resolve;
+ let onFulfilledValue;
+ const p = new Promise(r => resolve = r);
+ resolve(Promise.resolve(1));
+ p.then(
+ v => {
+ onFulfilledValue = v;
+ },
+ e => {
+ assertUnreachable();
+ });
+ setTimeout(_ => assertEquals(1, onFulfilledValue));
+})();
+
+(function() {
+ let resolve;
+ let onRejectedReason;
+ const p = new Promise(r => resolve = r);
+ resolve(Promise.reject(1));
+ p.then(
+ v => {
+ assertUnreachable();
+ },
+ e => {
+ onRejectedReason = e;
+ });
+ setTimeout(_ => assertEquals(1, onRejectedReason));
+})();
+
+(function() {
+ let onFulfilledValue;
+ (async () => Promise.resolve(1))().then(
+ v => {
+ onFulfilledValue = v;
+ },
+ e => {
+ assertUnreachable();
+ });
+ setTimeout(_ => assertEquals(1, onFulfilledValue));
+})();
+
+(function() {
+ let onRejectedReason;
+ (async () => Promise.reject(1))().then(
+ v => {
+ assertUnreachable();
+ },
+ e => {
+ onRejectedReason = e;
+ });
+ setTimeout(_ => assertEquals(1, onRejectedReason));
+})();
+
+(function() {
+ let resolve;
+ let onFulfilledValue;
+ const p = new Promise(r => resolve = r);
+ resolve({
+ then(onFulfilled, onRejected) {
+ onFulfilled(1);
+ }
+ });
+ p.then(
+ v => {
+ onFulfilledValue = v;
+ },
+ e => {
+ assertUnreachable();
+ });
+ setTimeout(_ => assertEquals(1, onFulfilledValue));
+})();
+
+(function() {
+ let resolve;
+ let onRejectedReason;
+ const p = new Promise(r => resolve = r);
+ resolve({
+ then(onFulfilled, onRejected) {
+ onRejected(1);
+ }
+ });
+ p.then(
+ v => {
+ assertUnreachable();
+ },
+ e => {
+ onRejectedReason = e;
+ });
+ setTimeout(_ => assertEquals(1, onRejectedReason));
+})();
+
+(function() {
+ let onFulfilledValue;
+ (async () => ({
+ then(onFulfilled, onRejected) {
+ onFulfilled(1);
+ }
+ }))().then(
+ v => {
+ onFulfilledValue = v;
+ },
+ e => {
+ assertUnreachable();
+ });
+ setTimeout(_ => assertEquals(1, onFulfilledValue));
+})();
+
+(function() {
+ let onRejectedReason;
+ (async () => ({
+ then(onFulfilled, onRejected) {
+ onRejected(1);
+ }
+ }))().then(
+ v => {
+ assertUnreachable();
+ },
+ e => {
+ onRejectedReason = e;
+ });
+ setTimeout(_ => assertEquals(1, onRejectedReason));
+})();
diff --git a/deps/v8/test/mjsunit/es6/proxies.js b/deps/v8/test/mjsunit/es6/proxies.js
index 75a80a15bd..f67f9df41e 100644
--- a/deps/v8/test/mjsunit/es6/proxies.js
+++ b/deps/v8/test/mjsunit/es6/proxies.js
@@ -1287,8 +1287,7 @@ TestKeysThrow({
// ---------------------------------------------------------------------------
// String conversion (Object.prototype.toString,
-// Object.prototype.toLocaleString,
-// Function.prototype.toString)
+// Object.prototype.toLocaleString)
var key
@@ -1306,7 +1305,6 @@ function TestToString(handler) {
assertEquals(Symbol.toStringTag, key)
assertEquals("my_proxy", Object.prototype.toLocaleString.call(f))
assertEquals("toString", key)
- assertThrows(function(){ Function.prototype.toString.call(f) })
var o = Object.create(p)
key = ""
diff --git a/deps/v8/test/mjsunit/es6/proxy-function-tostring.js b/deps/v8/test/mjsunit/es6/proxy-function-tostring.js
new file mode 100644
index 0000000000..d859822df0
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/proxy-function-tostring.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --noharmony-function-tostring
+
+assertThrows(() => new Proxy(function() {}, {}).toString(), TypeError);
diff --git a/deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js b/deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js
new file mode 100644
index 0000000000..5d29e7a8f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js
@@ -0,0 +1,236 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// NOTE:
+// Tests in this file are meant to run in the presence of an invalidated
+// NoElementsProtector, as effected by the following line.
+Array.prototype[0] = 42;
+delete Array.prototype[0];
+
+
+(function TestBasics() {
+ var a = [1, 2];
+ var b = [...a];
+ assertArrayEquals([1, 2], b)
+
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e', 'f'],
+ ['a', ...'bc', 'd', ...'ef'])
+})();
+
+
+var log = [];
+
+function* gen(n) {
+ log.push(n, 1);
+ yield 1;
+ log.push(n, 2);
+ yield 2;
+ log.push(n, 3);
+ yield 3;
+ log.push(n, 'done');
+}
+
+function id(v) {
+ log.push(v);
+ return v;
+}
+
+
+(function TestGenerator() {
+ assertArrayEquals([1, 2, 3], [...gen('a')]);
+ assertArrayEquals(['x', 1, 2, 3, 'y', 1, 2, 3, 'z'],
+ ['x', ...gen('a'), 'y', ...gen('b'), 'z']);
+})();
+
+
+(function TestOrderOfExecution() {
+ log = [];
+ assertArrayEquals(['x', 1, 2, 3, 'y', 1, 2, 3, 'z'],
+ [id('x'), ...gen('a'), id('y'), ...gen('b'), id('z')]);
+ assertArrayEquals([
+ 'x', 'a', 1, 'a', 2, 'a', 3, 'a', 'done',
+ 'y', 'b', 1, 'b', 2, 'b', 3, 'b', 'done',
+ 'z'
+ ], log);
+})();
+
+
+(function TestNotIterable() {
+ var a;
+ assertThrows(function() {
+ a = [...42];
+ }, TypeError);
+ assertSame(undefined, a);
+
+
+})();
+
+
+(function TestInvalidIterator() {
+ var iter = {
+ [Symbol.iterator]: 42
+ };
+ var a;
+ assertThrows(function() {
+ a = [...iter];
+ }, TypeError);
+ assertSame(undefined, a);
+})();
+
+
+(function TestIteratorNotAnObject() {
+ var iter = {
+ [Symbol.iterator]() {
+ return 42;
+ }
+ };
+ var a;
+ assertThrows(function() {
+ a = [...iter];
+ }, TypeError);
+ assertSame(undefined, a);
+})();
+
+
+(function TestIteratorNoNext() {
+ var iter = {
+ [Symbol.iterator]() {
+ return {};
+ }
+ };
+ var a;
+ assertThrows(function() {
+ a = [...iter];
+ }, TypeError);
+ assertSame(undefined, a);
+})();
+
+
+(function TestIteratorResultDoneThrows() {
+ function MyError() {}
+ var iter = {
+ [Symbol.iterator]() {
+ return {
+ next() {
+ return {
+ get done() {
+ throw new MyError();
+ }
+ }
+ }
+ };
+ }
+ };
+ var a;
+ assertThrows(function() {
+ a = [...iter];
+ }, MyError);
+ assertSame(undefined, a);
+})();
+
+
+(function TestIteratorResultValueThrows() {
+ function MyError() {}
+ var iter = {
+ [Symbol.iterator]() {
+ return {
+ next() {
+ return {
+ done: false,
+ get value() {
+ throw new MyError();
+ }
+ }
+ }
+ };
+ }
+ };
+ var a;
+ assertThrows(function() {
+ a = [...iter];
+ }, MyError);
+ assertSame(undefined, a);
+})();
+
+
+(function TestOptimize() {
+ function f() {
+ return [...'abc'];
+ }
+ assertArrayEquals(['a', 'b', 'c'], f());
+ %OptimizeFunctionOnNextCall(f);
+ assertArrayEquals(['a', 'b', 'c'], f());
+})();
+
+
+(function TestDeoptimize() {
+ var iter = {
+ [Symbol.iterator]() {
+ var i = 0;
+ return {
+ next() {
+ %DeoptimizeFunction(f);
+ return {value: ++i, done: i === 3};
+ }
+ };
+ }
+ };
+ function f() {
+ return [0, ...iter];
+ }
+
+ assertArrayEquals([0, 1, 2], f());
+})();
+
+
+(function TestPrototypeSetter1() {
+ Object.defineProperty(Array.prototype, 3, {set() {throw 666}})
+ Object.defineProperty(Array.prototype, 4, {set() {throw 666}})
+
+ function f() {
+ return ['a', ...['b', 'c', 'd'], 'e']
+ }
+
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+ %OptimizeFunctionOnNextCall(f);
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+
+ delete Array.prototype[3];
+ delete Array.prototype[4];
+})();
+
+
+(function TestPrototypeSetter2() {
+ Object.defineProperty(Array.prototype.__proto__, 3, {set() {throw 666}})
+ Object.defineProperty(Array.prototype.__proto__, 4, {set() {throw 666}})
+
+ function f() {
+ return ['a', ...['b', 'c', 'd'], 'e']
+ }
+
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+ %OptimizeFunctionOnNextCall(f);
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+
+ delete Array.prototype.__proto__[3];
+ delete Array.prototype.__proto__[4];
+})();
+
+
+(function TestPrototypeProxy() {
+ const backup = Array.prototype.__proto__;
+ Array.prototype.__proto__ = new Proxy({}, {set() {throw 666}});
+
+ function f() {
+ return ['a', ...['b', 'c', 'd'], 'e']
+ }
+
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+ %OptimizeFunctionOnNextCall(f);
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+
+ Object.setPrototypeOf(Array.prototype, backup);
+})();
diff --git a/deps/v8/test/mjsunit/es6/spread-array.js b/deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js
index d112422b78..ea4d133703 100644
--- a/deps/v8/test/mjsunit/es6/spread-array.js
+++ b/deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js
@@ -4,6 +4,10 @@
// Flags: --allow-natives-syntax
+// NOTE:
+// Tests in this file are meant to run in the presence of a valid
+// NoElementsProtector. Do not touch Array.prototype here.
+
(function TestBasics() {
var a = [1, 2];
var b = [...a];
@@ -165,7 +169,7 @@ function id(v) {
var i = 0;
return {
next() {
- $DeoptimizeFunction(f);
+ %DeoptimizeFunction(f);
return {value: ++i, done: i === 3};
}
};
@@ -176,4 +180,4 @@ function id(v) {
}
assertArrayEquals([0, 1, 2], f());
-});
+})();
diff --git a/deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js b/deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js
new file mode 100644
index 0000000000..ed38228c28
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+(function TestPrototypeProxy() {
+ const backup = Array.prototype.__proto__;
+ Array.prototype.__proto__ = new Proxy({}, {set() {throw 666}});
+
+ function f() {
+ return ['a', ...['b', 'c', 'd'], 'e']
+ }
+
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+ %OptimizeFunctionOnNextCall(f);
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+
+ Object.setPrototypeOf(Array.prototype, backup);
+})();
diff --git a/deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js
new file mode 100644
index 0000000000..2ca9e21787
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+(function TestPrototypeSetter1() {
+ Object.defineProperty(Array.prototype, 3, {set() {throw 666}})
+ Object.defineProperty(Array.prototype, 4, {set() {throw 666}})
+
+ function f() {
+ return ['a', ...['b', 'c', 'd'], 'e']
+ }
+
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+ %OptimizeFunctionOnNextCall(f);
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+
+ delete Array.prototype[3];
+ delete Array.prototype[4];
+})();
diff --git a/deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js
new file mode 100644
index 0000000000..736d50b46b
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+(function TestPrototypeSetter2() {
+ Object.defineProperty(Array.prototype.__proto__, 3, {set() {throw 666}})
+ Object.defineProperty(Array.prototype.__proto__, 4, {set() {throw 666}})
+
+ function f() {
+ return ['a', ...['b', 'c', 'd'], 'e']
+ }
+
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+ %OptimizeFunctionOnNextCall(f);
+ assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
+
+ delete Array.prototype.__proto__[3];
+ delete Array.prototype.__proto__[4];
+})();
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index d83e2174ec..a6c12909b4 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -67,8 +67,6 @@ function TestType() {
assertEquals("symbol", typeof symbols[i])
assertTrue(typeof symbols[i] === "symbol")
assertFalse(%SymbolIsPrivate(symbols[i]))
- assertEquals(null, %_ClassOf(symbols[i]))
- assertEquals("Symbol", %_ClassOf(Object(symbols[i])))
}
}
TestType()
diff --git a/deps/v8/test/mjsunit/es6/templates.js b/deps/v8/test/mjsunit/es6/templates.js
index 3eb73e4d16..3e113cb829 100644
--- a/deps/v8/test/mjsunit/es6/templates.js
+++ b/deps/v8/test/mjsunit/es6/templates.js
@@ -342,27 +342,30 @@ var obj = {
var a = 1;
var b = 2;
- tag`head${a}tail`;
- tag`head${b}tail`;
-
+ // Call-sites are cached by ParseNode. Same tag call in a loop
+ // means same template object
+ for (var i = 0; i < 2; ++i) {
+ tag`head${i == 0 ? a : b}tail`;
+ }
assertEquals(2, callSites.length);
assertSame(callSites[0], callSites[1]);
- eval("tag`head${a}tail`");
- assertEquals(3, callSites.length);
- assertSame(callSites[1], callSites[2]);
-
- eval("tag`head${b}tail`");
+ // Tag calls within eval() never have the same ParseNode as the same tag
+ // call from a different eval() invocation.
+ for (var i = 0; i < 2; ++i) {
+ eval("tag`head${i == 0 ? a : b}tail`");
+ }
assertEquals(4, callSites.length);
- assertSame(callSites[2], callSites[3]);
+ assertTrue(callSites[1] !== callSites[2]);
+ assertTrue(callSites[2] !== callSites[3]);
(new Function("tag", "a", "b", "return tag`head${a}tail`;"))(tag, 1, 2);
assertEquals(5, callSites.length);
- assertSame(callSites[3], callSites[4]);
+ assertTrue(callSites[3] !== callSites[4]);
(new Function("tag", "a", "b", "return tag`head${b}tail`;"))(tag, 1, 2);
assertEquals(6, callSites.length);
- assertSame(callSites[4], callSites[5]);
+ assertTrue(callSites[4] !== callSites[5]);
callSites = [];
@@ -374,17 +377,19 @@ var obj = {
callSites = [];
- eval("tag`\\\r\n\\\n\\\r`");
- eval("tag`\\\r\n\\\n\\\r`");
+ for (var i = 0; i < 2; ++i) {
+ eval("tag`\\\r\n\\\n\\\r`");
+ }
assertEquals(2, callSites.length);
- assertSame(callSites[0], callSites[1]);
+ assertTrue(callSites[0] !== callSites[1]);
assertEquals("", callSites[0][0]);
assertEquals("\\\n\\\n\\\n", callSites[0].raw[0]);
callSites = [];
- tag`\uc548\ub155`;
- tag`\uc548\ub155`;
+ for (var i = 0; i < 2; ++i) {
+ tag`\uc548\ub155`;
+ }
assertEquals(2, callSites.length);
assertSame(callSites[0], callSites[1]);
assertEquals("안녕", callSites[0][0]);
@@ -735,3 +740,128 @@ var global = this;
assertThrows(() => Function("f`foo`--"), ReferenceError);
assertThrows(() => Function("f`foo` = 1"), ReferenceError);
})();
+
+// Disable eval caching if a tagged template occurs in a nested function
+var v = 0;
+var templates = [];
+function tag(callSite) { templates.push(callSite); }
+for (v = 0; v < 6; v += 2) {
+ eval("(function() { for (var i = 0; i < 2; ++i) tag`Hello${v}world` })()");
+ assertSame(templates[v], templates[v + 1]);
+}
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+
+function makeSource1(id) {
+ return `function f() {
+ for (var i = 0; i < 2; ++i) tag\`Hello${id}world\`;
+ }
+ f();`;
+}
+templates = [];
+for (v = 0; v < 6; v += 2) {
+ eval(makeSource1(v));
+ assertSame(templates[v], templates[v + 1]);
+}
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+
+templates = [];
+eval("(function() { for (var i = 0; i < 2; ++i) tag`Hello${1}world` })()");
+eval("(function() { for (var i = 0; i < 2; ++i) tag`Hello${2}world` })()");
+eval("(function() { for (var i = 0; i < 2; ++i) tag`Hello${2}world` })()");
+assertSame(templates[0], templates[1]);
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertSame(templates[2], templates[3]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+assertSame(templates[4],templates[5]);
+
+templates = [];
+eval(makeSource1(1));
+eval(makeSource1(2));
+eval(makeSource1(3));
+assertSame(templates[0], templates[1]);
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertSame(templates[2], templates[3]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+assertSame(templates[4],templates[5]);
+
+// Disable eval caching if a tagged template occurs in an even deeper nested function
+var v = 0;
+templates = [];
+for (v = 0; v < 6; v += 2) {
+ eval("(function() { (function() { for (var i = 0; i < 2; ++i) tag`Hello${v}world` })() })()");
+ if (!v) continue;
+ assertNotSame(templates[v], templates[v - 1]);
+}
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+
+function makeSource2(id) {
+ return `function f() {
+ function innerF() {
+ for (var i = 0; i < 2; ++i) tag\`Hello${id}world\`;
+ }
+ return innerF();
+ }
+ f();`;
+}
+templates = [];
+for (v = 0; v < 6; v += 2) {
+ eval(makeSource2(v));
+ assertSame(templates[v], templates[v + 1]);
+}
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+
+templates = [];
+eval("(function() { (function() { for (var i = 0; i < 2; ++i) tag`Hello${1}world` })() })()");
+eval("(function() { (function() { for (var i = 0; i < 2; ++i) tag`Hello${2}world` })() })()");
+eval("(function() { (function() { for (var i = 0; i < 2; ++i) tag`Hello${2}world` })() })()");
+assertSame(templates[0], templates[1]);
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertSame(templates[2], templates[3]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+assertSame(templates[4], templates[5]);
+
+templates = [];
+eval(makeSource2(1));
+eval(makeSource2(2));
+eval(makeSource2(3));
+assertSame(templates[0], templates[1]);
+assertNotSame(templates[0], templates[2]);
+assertNotSame(templates[0], templates[4]);
+assertNotSame(templates[1], templates[3]);
+assertNotSame(templates[1], templates[5]);
+assertSame(templates[2], templates[3]);
+assertNotSame(templates[2], templates[4]);
+assertNotSame(templates[3], templates[5]);
+assertSame(templates[4], templates[5]);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
index 66316f300e..6573f2635d 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
@@ -120,6 +120,31 @@ tests.push(function TestConstructFromArrayNoIteratorWithGetter(constr) {
assertArrayEquals([1, 2, 22], ta);
});
+tests.push(function TestConstructFromArrayNullIterator(constr) {
+ var arr = [1, 2, 3];
+ arr[Symbol.iterator] = null;
+
+ var ta = new Uint8Array(arr);
+
+ assertArrayEquals([1, 2, 3], ta);
+});
+
+tests.push(function TestConstructFromArrayUndefinedIterator(constr) {
+ var arr = [1, 2, 3];
+ arr[Symbol.iterator] = undefined;
+
+ var ta = new Uint8Array(arr);
+
+ assertArrayEquals([1, 2, 3], ta);
+});
+
+tests.push(function TestConstructFromArrayNonCallableIterator(constr) {
+ var arr = [1, 2, 3];
+ arr[Symbol.iterator] = 1;
+
+ assertThrows(() => new Uint8Array(arr), TypeError);
+});
+
tests.push(function TestConstructFromArray(constr) {
var n = 64;
var jsArray = [];
@@ -150,6 +175,44 @@ tests.push(function TestConstructFromTypedArray(constr) {
}
});
+tests.push(function TestFromTypedArraySpecies(constr) {
+ var b = new ArrayBuffer(16);
+ var a1 = new constr(b);
+
+ var constructor_read = 0;
+ var cons = b.constructor;
+
+ Object.defineProperty(b, 'constructor', {
+ get: function() {
+ constructor_read++;
+ return cons;
+ }
+ });
+
+ var a2 = new constr(a1);
+
+ assertEquals(1, constructor_read);
+});
+
+tests.push(function TestFromTypedArraySpeciesNeutersBuffer(constr) {
+ var b = new ArrayBuffer(16);
+ var a1 = new constr(b);
+
+ var constructor_read = 0;
+ var cons = b.constructor;
+
+ Object.defineProperty(b, 'constructor', {
+ get: function() {
+ %ArrayBufferNeuter(b);
+ return cons;
+ }
+ });
+
+ var a2 = new constr(a1);
+
+ assertArrayEquals([], a2);
+});
+
tests.push(function TestLengthIsMaxSmi(constr) {
var myObject = { 0: 5, 1: 6, length: %_MaxSmi() + 1 };
diff --git a/deps/v8/test/mjsunit/es6/typedarray-every.js b/deps/v8/test/mjsunit/es6/typedarray-every.js
index a3498f5786..968078988f 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-every.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-every.js
@@ -132,7 +132,6 @@ function TestTypedArrayForEach(constructor) {
});
assertEquals(2, count);
assertTrue(!!buffer);
- assertEquals("ArrayBuffer", %_ClassOf(buffer));
assertSame(buffer, a.buffer);
// The %TypedArray%.every() method should not work when
diff --git a/deps/v8/test/mjsunit/es6/typedarray-filter.js b/deps/v8/test/mjsunit/es6/typedarray-filter.js
new file mode 100644
index 0000000000..0f25c362ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-filter.js
@@ -0,0 +1,111 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array];
+
+function TestTypedArrayFilter(constructor) {
+ assertEquals(1, constructor.prototype.filter.length);
+
+ // Throw type error if source array is detached while executing a callback
+ let ta1 = new constructor(10);
+ assertThrows(() =>
+ ta1.filter(() => %ArrayBufferNeuter(ta1.buffer))
+ , TypeError);
+
+ // A new typed array should be created after finishing callbacks
+ var speciesCreated = 0;
+
+ class MyTypedArray extends constructor {
+ static get [Symbol.species]() {
+ return function() {
+ speciesCreated++;
+ return new constructor(10);
+ };
+ }
+ }
+
+ new MyTypedArray(10).filter(() => {
+ assertEquals(0, speciesCreated);
+ return true;
+ });
+ assertEquals(1, speciesCreated);
+
+ // A new typed array should be initialized to 0s
+ class LongTypedArray extends constructor {
+ static get [Symbol.species]() {
+ return function(len) {
+ return new constructor(len * 2);
+ }
+ }
+ }
+ let ta2 = new LongTypedArray(3).fill(1);
+ let ta3 = ta2.filter((val, index, array) => val > 0);
+ assertArrayEquals(ta3, [1, 1, 1, 0, 0, 0]);
+ assertEquals(ta3.constructor, constructor);
+
+ // Throw if a new typed array is too small
+ class ShortTypedArray extends constructor {
+ static get [Symbol.species]() {
+ return function(len) {
+ return new constructor(len/2);
+ }
+ }
+ }
+ assertThrows(() => new ShortTypedArray(10).filter(() => true));
+
+ // Throw if callback is not callable
+ assertThrows(() => new constructor(10).filter(123));
+ assertThrows(() => new constructor(10).filter({}));
+
+ // Empty result
+ assertEquals(new constructor(10).filter(() => false), new constructor(0));
+
+ // If a new typed array shares a buffer with a source array
+ let ab = new ArrayBuffer(100);
+ class SharedBufferTypedArray extends constructor {
+ static get [Symbol.species]() {
+ return function(len) {
+ return new constructor(ab, 0, 5);
+ }
+ }
+ }
+ let ta4 = new SharedBufferTypedArray(ab, 0, 5).fill(1);
+ let ta5 = ta4.filter(() => {
+ ta4[0] = 123;
+ ta4[2] = 123;
+ return true;
+ });
+ assertEquals(ta4.buffer, ta5.buffer);
+ assertArrayEquals(ta4, [1, 1, 123, 1, 1]);
+ assertArrayEquals(ta5, [1, 1, 123, 1, 1]);
+
+ // If a new typed array has a different type with source array
+ for (let j = 0; j < typedArrayConstructors.length; j++) {
+ let otherConstructor = typedArrayConstructors[j];
+ class OtherTypedArray extends constructor {
+ static get [Symbol.species]() {
+ return function(len) {
+ return new otherConstructor(len);
+ }
+ }
+ }
+ let ta6 = new OtherTypedArray(10).fill(123);
+ assertEquals(ta6.filter(() => true), new otherConstructor(10).fill(123));
+ }
+}
+
+for (i = 0; i < typedArrayConstructors.length; i++) {
+ TestTypedArrayFilter(typedArrayConstructors[i]);
+}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-foreach.js b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
index 7a846b1ac7..252706a9b5 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-foreach.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
@@ -121,7 +121,6 @@ function TestTypedArrayForEach(constructor) {
});
assertEquals(2, count);
assertTrue(!!buffer);
- assertEquals("ArrayBuffer", %_ClassOf(buffer));
assertSame(buffer, a.buffer);
// The %TypedArray%.forEach() method should not work when
diff --git a/deps/v8/test/mjsunit/es6/typedarray-from.js b/deps/v8/test/mjsunit/es6/typedarray-from.js
index 709c453379..8157658249 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-from.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-from.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -14,19 +16,65 @@ var typedArrayConstructors = [
Float64Array
];
+function defaultValue(constr) {
+ if (constr == Float32Array || constr == Float64Array) return NaN;
+ return 0;
+}
+
+function assertArrayLikeEquals(value, expected, type) {
+ assertEquals(value.__proto__, type.prototype);
+ assertEquals(expected.length, value.length);
+ for (var i = 0; i < value.length; ++i) {
+ assertEquals(expected[i], value[i]);
+ }
+}
+
+(function() {
+ var source = [-0, 0, 2**40, 2**41, 2**42];
+ var arr = Float64Array.from(source);
+ assertArrayLikeEquals(arr, source, Float64Array);
+
+ arr = Float32Array.from(source);
+ assertArrayLikeEquals(arr, source, Float32Array);
+})();
+
+(function() {
+ var source = [-0, 0, , 2**42];
+ var expected = [-0, 0, NaN, 2**42];
+ var arr = Float64Array.from(source);
+ assertArrayLikeEquals(arr, expected, Float64Array);
+
+ arr = Float32Array.from(source);
+ assertArrayLikeEquals(arr, expected, Float32Array);
+})();
+
+(function() {
+ var source = {0: -0, 1: 0, 2: 2**40, 3: 2**41, 4: 2**42, length: 5};
+ var expected = [-0, 0, 2**40, 2**41, 2**42];
+ var arr = Float64Array.from(source);
+ assertArrayLikeEquals(arr, expected, Float64Array);
+
+ arr = Float32Array.from(source);
+ assertArrayLikeEquals(arr, expected, Float32Array);
+})();
+
+(function() {
+ var source = [-0, 0, , 2**42];
+ Object.getPrototypeOf(source)[2] = 27;
+ var expected = [-0, 0, 27, 2**42];
+ var arr = Float64Array.from(source);
+ assertArrayLikeEquals(arr, expected, Float64Array);
+
+ arr = Float32Array.from(source);
+ assertArrayLikeEquals(arr, expected, Float32Array);
+})();
+
for (var constructor of typedArrayConstructors) {
assertEquals(1, constructor.from.length);
// TypedArray.from only callable on this subclassing %TypedArray%
assertThrows(function () {constructor.from.call(Array, [])}, TypeError);
- function assertArrayLikeEquals(value, expected, type) {
- assertEquals(value.__proto__, type.prototype);
- assertEquals(expected.length, value.length);
- for (var i = 0; i < value.length; ++i) {
- assertEquals(expected[i], value[i]);
- }
- }
// Assert that calling mapfn with / without thisArg in sloppy and strict modes
// works as expected.
@@ -47,6 +95,14 @@ for (var constructor of typedArrayConstructors) {
assertThrows(function() {constructor.from.call(1, [])}, TypeError);
assertThrows(function() {constructor.from.call(undefined, [])}, TypeError);
+ // Use a map function that returns non-numbers.
+ function mapper(value, index) {
+ return String.fromCharCode(value);
+ }
+ var d = defaultValue(constructor);
+ assertArrayLikeEquals(
+ constructor.from([72, 69, 89], mapper), [d, d, d], constructor);
+
// Converting from various other types, demonstrating that it can
// operate on array-like objects as well as iterables.
// TODO(littledan): constructors should have similar flexibility.
@@ -72,12 +128,62 @@ for (var constructor of typedArrayConstructors) {
assertArrayLikeEquals(constructor.from(generator()),
[4, 5, 6], constructor);
+ // Check mapper is used for non-iterator case.
+ function mapper2(value, index) {
+ return value + 1;
+ }
+ var array_like = {
+ 0: 1,
+ 1: 2,
+ 2: 3,
+ length: 3
+ };
+ assertArrayLikeEquals(constructor.from(array_like, mapper2),
+ [2, 3, 4], constructor);
+
+ // With a smi source. Step 10 will set len = 0.
+ var source = 1;
+ assertArrayLikeEquals(constructor.from(source), [], constructor);
+
assertThrows(function() { constructor.from(null); }, TypeError);
assertThrows(function() { constructor.from(undefined); }, TypeError);
assertThrows(function() { constructor.from([], null); }, TypeError);
assertThrows(function() { constructor.from([], 'noncallable'); },
TypeError);
+ source = [1, 2, 3];
+ var proxy = new Proxy(source, {});
+ assertArrayLikeEquals(constructor.from(proxy), source, constructor);
+
+ proxy = new Proxy(source, {
+ get: function(target, name) {
+ if (name === Symbol.iterator) return target[name];
+ if (name === "length") return 3;
+ return target[name] + 1;
+ }
+ });
+ assertArrayLikeEquals(constructor.from(proxy), [2, 3, 4], constructor);
+}
+
+// Tests that modify global state in a way that affects fast paths e.g. by
+// invalidating protectors or changing prototypes.
+for (var constructor of typedArrayConstructors) {
+ source = [1, 2, 3];
+ source[Symbol.iterator] = undefined;
+ assertArrayLikeEquals(constructor.from(source), source, constructor);
+
+ source = [{ valueOf: function(){ return 42; }}];
+ source[Symbol.iterator] = undefined;
+ assertArrayLikeEquals(constructor.from(source), [42], constructor);
+
+ Number.prototype[Symbol.iterator] = function* () {
+ yield 1;
+ yield 2;
+ yield 3;
+ }
+ assertArrayLikeEquals(constructor.from(1), [1, 2, 3], constructor);
+ assertArrayLikeEquals(constructor.from(1.1), [1, 2, 3], constructor);
+
var nullIterator = {};
nullIterator[Symbol.iterator] = null;
assertArrayLikeEquals(constructor.from(nullIterator), [],
@@ -90,6 +196,26 @@ for (var constructor of typedArrayConstructors) {
assertThrows(function() { constructor.from([], null); }, TypeError);
+ d = defaultValue(constructor);
+ let ta1 = new constructor(3).fill(1);
+ Object.defineProperty(ta1, "length", {get: function() {
+ return 6;
+ }});
+ delete ta1[Symbol.iterator];
+ delete ta1.__proto__[Symbol.iterator];
+ delete ta1.__proto__.__proto__[Symbol.iterator];
+ assertArrayLikeEquals(constructor.from(ta1), [1, 1, 1, d, d, d], constructor);
+
+ let ta2 = new constructor(3).fill(1);
+ Object.defineProperty(ta2, "length", {get: function() {
+ %ArrayBufferNeuter(ta2.buffer);
+ return 6;
+ }});
+ assertArrayLikeEquals(constructor.from(ta2), [d, d, d, d, d, d], constructor);
+
+ var o1 = {0: 0, 1: 1, 2: 2, length: 6};
+ assertArrayLikeEquals(constructor.from(o1), [0, 1, 2, d, d, d], constructor);
+
// Ensure iterator is only accessed once, and only invoked once
var called = 0;
var arr = [1, 2, 3];
diff --git a/deps/v8/test/mjsunit/es6/typedarray-of.js b/deps/v8/test/mjsunit/es6/typedarray-of.js
index eaa7bde11b..8ae590a849 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-of.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-of.js
@@ -1,6 +1,8 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --expose-gc
// Based on Mozilla Array.of() tests at http://dxr.mozilla.org/mozilla-central/source/js/src/jit-test/tests/collections
@@ -130,6 +132,20 @@ function TestTypedArrayOf(constructor) {
for (var x of [undefined, null, false, true, "cow", 42, 3.14]) {
assertThrows(function () { constructor.of.call(x); }, TypeError);
}
+
+ // Check if it's correctly accessing new typed array elements even after
+ // garbage collection is invoked in ToNumber.
+ var not_number = {
+ [Symbol.toPrimitive]() {
+ gc();
+ return 123;
+ }
+ };
+ var dangerous_array = new Array(64).fill(not_number);
+ var a = constructor.of(...dangerous_array);
+ for (var i = 0; i < 64; i++) {
+ assertEquals(123, a[i]);
+ }
}
for (var constructor of typedArrayConstructors) {
diff --git a/deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js b/deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js
index dd4fa39ab5..1cc46df1b6 100644
--- a/deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js
@@ -13,6 +13,7 @@ assertThrows("/\\c/u", SyntaxError);
assertThrows("/\\c0/u", SyntaxError);
// test262/built-ins/RegExp/unicode_restricted_quantifiable_assertion
assertThrows("/(?=.)*/u", SyntaxError);
+assertThrows("/(?=.){1,2}/u", SyntaxError);
// test262/built-ins/RegExp/unicode_restricted_octal_escape
assertThrows("/[\\1]/u", SyntaxError);
assertThrows("/\\00/u", SyntaxError);
diff --git a/deps/v8/test/mjsunit/es8/object-entries.js b/deps/v8/test/mjsunit/es8/object-entries.js
index 5c7e74e378..1c6358648b 100644
--- a/deps/v8/test/mjsunit/es8/object-entries.js
+++ b/deps/v8/test/mjsunit/es8/object-entries.js
@@ -19,7 +19,7 @@ function TestMeta() {
TestMeta();
-function TestBasic() {
+function TestBasic(withWarmup) {
var x = 16;
var O = {
d: 1,
@@ -33,22 +33,29 @@ function TestBasic() {
O.a = 2;
O.b = 4;
Object.defineProperty(O, "HIDDEN", { enumerable: false, value: NaN });
- assertEquals([
+ if (withWarmup) {
+ for (const key in O) {}
+ }
+ O.c = 6;
+ const resultEntries = [
["0", 123],
["256", "ducks"],
["1000", 456],
["d", 1],
- ["c", 3],
+ ["c", 6],
["0x100", "quack"],
["a", 2],
["b", 4]
- ], Object.entries(O));
+ ];
+ assertEquals(resultEntries, Object.entries(O));
+ assertEquals(resultEntries, Object.entries(O));
assertEquals(Object.entries(O), Object.keys(O).map(key => [key, O[key]]));
assertTrue(Array.isArray(Object.entries({})));
assertEquals(0, Object.entries({}).length);
}
TestBasic();
+TestBasic(true);
function TestToObject() {
@@ -59,7 +66,7 @@ function TestToObject() {
TestToObject();
-function TestOrder() {
+function TestOrder(withWarmup) {
var O = {
a: 1,
[Symbol.iterator]: null
@@ -88,6 +95,11 @@ function TestOrder() {
}
});
+ if (withWarmup) {
+ for (const key in P) {}
+ }
+ log = [];
+
assertEquals([["456", 123], ["a", 1]], Object.entries(P));
assertEquals([
"[[OwnPropertyKeys]]",
@@ -99,9 +111,10 @@ function TestOrder() {
], log);
}
TestOrder();
+TestOrder(true);
-function TestOrderWithDuplicates() {
+function TestOrderWithDuplicates(withWarmup) {
var O = {
a: 1,
[Symbol.iterator]: null
@@ -130,6 +143,11 @@ function TestOrderWithDuplicates() {
}
});
+ if (withWarmup) {
+ for (const key in P) {}
+ }
+ log = [];
+
assertEquals([
["a", 1],
["a", 1],
@@ -151,9 +169,20 @@ function TestOrderWithDuplicates() {
], log);
}
TestOrderWithDuplicates();
+TestOrderWithDuplicates(true);
+function TestDescriptorProperty() {
+ function f() {};
+ const o = {};
+ o.a = f;
-function TestPropertyFilter() {
+ for (const key in o) {};
+ const entries = Object.entries(o);
+ assertEquals([['a', f]], entries);
+}
+TestDescriptorProperty();
+
+function TestPropertyFilter(withWarmup) {
var object = { prop3: 30 };
object[2] = 40;
object["prop4"] = 50;
@@ -164,6 +193,10 @@ function TestPropertyFilter() {
var sym = Symbol("prop8");
object[sym] = 90;
+ if (withWarmup) {
+ for (const key in object) {}
+ }
+
values = Object.entries(object);
assertEquals(5, values.length);
assertEquals([
@@ -175,11 +208,15 @@ function TestPropertyFilter() {
], values);
}
TestPropertyFilter();
+TestPropertyFilter(true);
-function TestWithProxy() {
+function TestWithProxy(withWarmup) {
var obj1 = {prop1:10};
var proxy1 = new Proxy(obj1, { });
+ if (withWarmup) {
+ for (const key in proxy1) {}
+ }
assertEquals([ [ "prop1", 10 ] ], Object.entries(proxy1));
var obj2 = {};
@@ -191,6 +228,9 @@ function TestWithProxy() {
return Reflect.getOwnPropertyDescriptor(target, name);
}
});
+ if (withWarmup) {
+ for (const key in proxy2) {}
+ }
assertEquals([ [ "prop2", 20 ], [ "prop3", 30 ] ], Object.entries(proxy2));
var obj3 = {};
@@ -206,12 +246,16 @@ function TestWithProxy() {
return [ "prop0", "prop1", Symbol("prop2"), Symbol("prop5") ];
}
});
+ if (withWarmup) {
+ for (const key in proxy3) {}
+ }
assertEquals([ [ "prop0", 0 ], [ "prop1", 5 ] ], Object.entries(proxy3));
}
TestWithProxy();
+TestWithProxy(true);
-function TestMutateDuringEnumeration() {
+function TestMutateDuringEnumeration(withWarmup) {
var aDeletesB = {
get a() {
delete this.b;
@@ -219,6 +263,9 @@ function TestMutateDuringEnumeration() {
},
b: 2
};
+ if (withWarmup) {
+ for (const key in aDeletesB) {}
+ }
assertEquals([ [ "a", 1 ] ], Object.entries(aDeletesB));
var aRemovesB = {
@@ -228,9 +275,15 @@ function TestMutateDuringEnumeration() {
},
b: 2
};
+ if (withWarmup) {
+ for (const key in aRemovesB) {}
+ }
assertEquals([ [ "a", 1 ] ], Object.entries(aRemovesB));
var aAddsB = { get a() { this.b = 2; return 1; } };
+ if (withWarmup) {
+ for (const key in aAddsB) {}
+ }
assertEquals([ [ "a", 1 ] ], Object.entries(aAddsB));
var aMakesBEnumerable = {};
@@ -243,12 +296,16 @@ function TestMutateDuringEnumeration() {
});
Object.defineProperty(aMakesBEnumerable, "b", {
value: 2, configurable:true, enumerable: false });
+ if (withWarmup) {
+ for (const key in aMakesBEnumerable) {}
+ }
assertEquals([ [ "a", 1 ], [ "b", 2 ] ], Object.entries(aMakesBEnumerable));
}
TestMutateDuringEnumeration();
+TestMutateDuringEnumeration(true);
-(function TestElementKinds() {
+function TestElementKinds(withWarmup) {
var O1 = { name: "1" }, O2 = { name: "2" }, O3 = { name: "3" };
var PI = 3.141592653589793;
var E = 2.718281828459045;
@@ -303,13 +360,22 @@ TestMutateDuringEnumeration();
}), [["0", "s"], ["1", "t"], ["2", "r"], ["9999", "Y"]] ],
};
+ if (withWarmup) {
+ for (const key in element_kinds) {}
+ }
for (let [kind, [object, expected]] of Object.entries(element_kinds)) {
+ if (withWarmup) {
+ for (const key in object) {}
+ }
let result1 = Object.entries(object);
%HeapObjectVerify(object);
%HeapObjectVerify(result1);
assertEquals(expected, result1, `fast Object.entries() with ${kind}`);
let proxy = new Proxy(object, {});
+ if (withWarmup) {
+ for (const key in proxy) {}
+ }
let result2 = Object.entries(proxy);
%HeapObjectVerify(result2);
assertEquals(result1, result2, `slow Object.entries() with ${kind}`);
@@ -331,9 +397,15 @@ TestMutateDuringEnumeration();
for (let [kind, [object, expected]] of Object.entries(element_kinds)) {
if (kind == "FAST_STRING_WRAPPER_ELEMENTS") break;
object.__defineGetter__(1, makeFastElements);
+ if (withWarmup) {
+ for (const key in object) {}
+ }
let result1 = Object.entries(object).toString();
%HeapObjectVerify(object);
%HeapObjectVerify(result1);
}
-})();
+}
+
+TestElementKinds();
+TestElementKinds(true);
diff --git a/deps/v8/test/mjsunit/es9/regexp-lookbehind.js b/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
index 54c975cfdf..c3aae317a9 100644
--- a/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
+++ b/deps/v8/test/mjsunit/es9/regexp-lookbehind.js
@@ -162,3 +162,10 @@ assertEquals(["cacb", "a", ""], /(?<=a(.\2)b(\1)).{4}/.exec("aabcacbc"));
assertEquals(["b", "ac", "ac"], /(?<=a(\2)b(..\1))b/.exec("aacbacb"));
assertEquals(["x", "aa"], /(?<=(?:\1b)(aa))./.exec("aabaax"));
assertEquals(["x", "aa"], /(?<=(?:\1|b)(aa))./.exec("aaaax"));
+
+// Restricted syntax in Annex B 1.4.
+assertThrows("/(?<=.)*/u", SyntaxError);
+assertThrows("/(?<=.){1,2}/u", SyntaxError);
+assertThrows("/(?<=.)*/", SyntaxError);
+assertThrows("/(?<=.)?/", SyntaxError);
+assertThrows("/(?<=.)+/", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js b/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
index a7b0d1bda4..2d6be098a2 100644
--- a/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
+++ b/deps/v8/test/mjsunit/harmony/async-from-sync-iterator.js
@@ -717,8 +717,8 @@ if (testFailed) {
next_: 0,
get next() {
log.push("get syncIterable.next");
- let i = this.next_++;
return (...args) => {
+ let i = this.next_++;
log.push("call syncIterable.next(" + args.join(", ") + ")");
return results[i];
}
@@ -748,14 +748,12 @@ if (testFailed) {
"get nextValue#1.then",
"call nextValue#1.then",
"got value value1",
- "get syncIterable.next",
"call syncIterable.next()",
"get iterResult #2.done",
"get iterResult #2.value",
"get nextValue#2.then",
"call nextValue#2.then",
"got value value2",
- "get syncIterable.next",
"call syncIterable.next()",
"get iterResult #3.done",
"get iterResult #3.value",
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-basic.js b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
index d7af1836b8..12240e6114 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-basic.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
@@ -153,7 +153,7 @@ async function* asyncGeneratorForToString() {}
assertEquals("async function* asyncGeneratorForToString() {}",
asyncGeneratorForToString.toString());
-assertEquals("async function* () {}", async function*() {}.toString());
+assertEquals("async function*() {}", async function*() {}.toString());
assertEquals("async function* namedAsyncGeneratorForToString() {}",
async function* namedAsyncGeneratorForToString() {}.toString());
@@ -164,9 +164,9 @@ assertEquals("async *method() { }",
assertEquals("async *method() { }",
(new (class { async *method() { } })).method.toString());
-assertEquals("async function* anonymous() {\n\n}",
+assertEquals("async function* anonymous(\n) {\n\n}",
AsyncGeneratorFunction().toString());
-assertEquals("async function* anonymous() {\n\n}",
+assertEquals("async function* anonymous(\n) {\n\n}",
(new AsyncGeneratorFunction()).toString());
// ----------------------------------------------------------------------------
diff --git a/deps/v8/test/mjsunit/harmony/bigint/add.js b/deps/v8/test/mjsunit/harmony/bigint/add.js
index b57846e7f1..5e986b3726 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/add.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/add.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "a2102214b151421124f462d37f843",
- b: "90f3fa0f2fb9b1481b1a4737586ad6bdf71cb2ae51e06fdcb00fb779163e94ae4237",
- r: "90f3fa0f2fb9b1481b1a4737586ad6bdf71cb2b872e29127c523d88b6584c1e63a7a"
+ a: -0xc4043e2c4cc49e4d6870103ce7c2ff2d512bf4b1b67553ba410db514ee0af8888ad6cfn,
+ b: 0x2aae86de73ff479133a657a40d26e8dcf192019c7421836615ec34978bad93n,
+ r: -0xc4043e2c2216176ef470c8abb41ca78944050bd4c4e3521dccec31aed81ec3f0ff293cn
}, {
- a: "35ca28bdd383c1b9ffdb851cc7f385ad370eef3d",
- b: "-ca2d4dd677f23e005f44ec121303c3c304940eb2fd15e9e88772a3c5ba8515",
- r: "-ca2d4dd677f23e005f44ebdc48db05ef80d254b32190cd2093ecf68eab95d8"
+ a: -0xee0933b25c8c0ef09fa385ffa1d3ca76855b560e186d27fa9f6ce105cb8517a4aecd5n,
+ b: -0x2ffc3a7babad313ede574774cb55cd40ab3b2n,
+ r: -0xee0933b25c8c0ef09fa385ffa1d3ca76b5579089c41a59397dc4287a96dae4e55a087n
}, {
- a: "-8abb4b6ca534b584fad2f5898dd22ae6",
- b: "0",
- r: "-8abb4b6ca534b584fad2f5898dd22ae6"
+ a: -0xdbb3e8fac212affdeda8959829838af77e43172n,
+ b: 0x2315999da3ac2ab89d2076cbe6e2e03082352e92c274680117ce7012dn,
+ r: 0x2315999da3ac2ab89c44c2e2ec20cd80844785fd2a4ae47620502cfbbn
}, {
- a: "b3",
- b: "4180a0a",
- r: "4180abd"
+ a: -0x5ad9780b309c24919defb1b331ebba4en,
+ b: 0xbe8dd806b3da0f79e7f6ad2fb566536cc78c1471c236891ce0n,
+ r: 0xbe8dd806b3da0f79e79bd3b7aa35b74835ee24c00f049d6292n
}, {
- a: "-8de89",
- b: "c329fbab24d762a9453f90b134fcf5da9777aa1fdb26b74f27583a92a43f0f2c450",
- r: "c329fbab24d762a9453f90b134fcf5da9777aa1fdb26b74f27583a92a43f0e9e5c7"
+ a: -0xade45eac6aaaf2c8097d7a3efecba4e80dd7aac4f692cfe841d113e68096c0218d521an,
+ b: -0x73c4119b366e22edd0797b5883a13e3bfc4124a559903ce785e0c833a8306b41cfn,
+ r: -0xade4d2707c4629362c6b4ab87a2428894c13a7061b3829787eb899c748ca6851f893e9n
}, {
- a: "-49af5f350d64c75047dfb107550dae478c983dd520e86c9807b1f5",
- b: "60a62691669b8c323a29db2eb9cb75ba5811",
- r: "-49af5f350d64c750477f0ae0c3a712bb5a5e13f9f22ea1224d59e4"
+ a: -0x6e9ab93d5cff3b3f31a55aa2f43fc321bff6978189e00fa4e64cn,
+ b: 0xec1caec29eb55312a313e50e20178873a350f1f89cbc971n,
+ r: -0x6e9aaa7b92131153dc743071b5eee120476f5d4c7ac085d91cdbn
}, {
- a: "80bf614aaa1140792099375f7fac9c7046303a8d13086755d505795f38761",
- b: "-949dc945",
- r: "80bf614aaa1140792099375f7fac9c7046303a8d13086755d50570155be1c"
+ a: -0x9a61c05bd53b74b731b8f8687e64d1ed1340404b5137ce39n,
+ b: -0x1e6793ea0fa51b2354b48n,
+ r: -0x9a61c05bd53b74b731b8f8687e66b86651e13a9d036d1981n
}, {
- a: "4241d736e6a40",
- b: "-78e88f5eaeae4ff8b",
- r: "-78e84d1cd7776954b"
+ a: -0x7c5a08517038f7dn,
+ b: 0x1d89f2n,
+ r: -0x7c5a08516e6058bn
}, {
- a: "-8033927bf52210827b99e712fb220631503adfaa4e0045c872b9b",
- b: "-2f",
- r: "-8033927bf52210827b99e712fb220631503adfaa4e0045c872bca"
+ a: -0xc4fd1b5aae2cfdc7b3ea6a74b7926a9a1ca3513bd08341e0c770080ban,
+ b: 0x76c79770202f9d9c6d2e244n,
+ r: -0xc4fd1b5aae2cfdc7b3ea6a74b7926a9a1c2c89a4606312432b02d9e76n
}, {
- a: "-3ad8b67efe9",
- b: "-35586bf43788fd8e313da33c62d9a5",
- r: "-35586bf43788fd8e314150c7cac98e"
+ a: 0xda114ddab6bfed910cc05c64f72961107beb98c3271n,
+ b: 0x98c52c393c6660663248fcbaa300b8n,
+ r: 0xda114ddab6bff71d5f83f02b5d2fc4350bb742f3329n
}, {
- a: "-a43d8c9af54e8ea545e1af4674613932650c833669c7adc9273b77",
- b: "-6a4",
- r: "-a43d8c9af54e8ea545e1af4674613932650c833669c7adc927421b"
+ a: -0x46311d926dfd26f0850ea3497fcd0d95a5dabab0361f219a601dc3dbe35d0n,
+ b: -0xf340af7b84e4eced2ca613e1ef4bb6aec4b8f01bc3n,
+ r: -0x46311d926dfd26f0851dd75477855be474ad8511743e1655cb0a0f6ae5193n
}, {
- a: "26c178e22dd42280a59b",
- b: "fba77d85ba082981ce4a1ca21ac8b805b389297dc",
- r: "fba77d85ba082981ce4a1f0e3256dae2f5b133d77"
+ a: 0xb2b4d9c0c73258a8092n,
+ b: 0xf4d1c3d2861cc0bfaea456c03b0f548a32effc1c3e1593935a6b0n,
+ r: 0xf4d1c3d2861cc0bfaea456c03b0f548a33a2b0f5fedcc5ec02742n
}, {
- a: "-c9bc2ac82920efc63fa48f63fae105ec432672b50269fad72ee8b44a1",
- b: "8967d49deeff878f40fa1bf408400b8085820d47b",
- r: "-c9bc2ac82920efc5b63cbac60be17e5d022c56c0fa29ef56a966a7026"
+ a: -0x56604879bdaaff7409cd59cfbafe73a74c284bfaeb991b6e11171bc08cf68bean,
+ b: 0xf4016a6e94c0f256af54fca00d6c909a620c6ea950c73cade3724174c12888n,
+ r: -0x556c470f4f163e81b31e04d31af10716b1c63f8c424854316333a97f18356362n
}, {
- a: "815a18c9a2d8c6e5f3fffa958430851c4ea3",
- b: "59d451c6efad276d3cc393907dda0eca463488958f397bb09",
- r: "59d451c6efad2f82de502dbe0a486e0a45dde0d8978b409ac"
+ a: -0x7a238c856c50aee41d1a2a1446a77f1567n,
+ b: 0x58n,
+ r: -0x7a238c856c50aee41d1a2a1446a77f150fn
}, {
- a: "8cfc360e8d215045cb5c289a50e5c7fa9da34c0b9d9be9597e6e476efdb121",
- b: "-482747619f0edd06",
- r: "8cfc360e8d215045cb5c289a50e5c7fa9da34c0b9d9be9115726e5cfeed41b"
+ a: 0xcdaf8c242e5da7265cc715871c3n,
+ b: -0x63604dd2b8df176c627ce3b018b2e74448746123d5578e66e9c0n,
+ r: -0x63604dd2b8df176c627ce3b00bd7ee82058e86b16f8b1d0e77fdn
}, {
- a: "346337dbb9bbfc08cb815434c50315d32d",
- b: "-ac569f54f5ea8852463c7542e876a9953",
- r: "299dcde66a5d5383a71d8ce0967bab39da"
+ a: 0xfa254e9c8d182d2bd90d8ff2b25d4e9819900b62b9ef60f3744ddc55cd362den,
+ b: -0x1fef80ff5d53dd0adaa75e8f0d16429851de115822n,
+ r: 0xfa254e9c8d182d2bd90d8df3ba4d58c2dbbf5db84406702210245737ec20abcn
}, {
- a: "8bb7c3e56",
- b: "-c334d52ed6eb903256253e01fc0c5118fe9bc",
- r: "-c334d52ed6eb903256253e01fc0bc5613ab66"
+ a: 0x3e85cn,
+ b: 0x687c558b2c1308c1a133d647ff7en,
+ r: 0x687c558b2c1308c1a133d64be7dan
}, {
- a: "b1f444a7a95e6d1d293ff0182e3dd5e945234484a5b47516b5b42627ed54fa8cf1221e",
- b: "-93b77e906778b7e0a85c07e08babe730edd93ed37adef19da9e76de2add3",
- r: "b1f444a7a8cab59e98d877604d9579e16497989d74c69bd7e23947364fab131f0e744b"
+ a: 0x9011db3dd56dbf2ef06154cbd48a67f6038483cff59f49011e57n,
+ b: 0xc033be10adcb6a7a5a697a7ef95ac3680611ff7ef690231314d338an,
+ r: 0xc03cbf2e61a8c1564d58809446180c0e857237c7338f7d07a4e51e1n
}, {
- a: "9a156524b9",
- b: "-bce28d1561fc0153b836c6e0969d2674fcb960331cdb55df24e34e4b65136fcb59",
- r: "-bce28d1561fc0153b836c6e0969d2674fcb960331cdb55df24e34e4acafe0aa6a0"
+ a: -0xefac02ab40df0f00a1b97a4n,
+ b: 0xc3b5e53656cd891e179ec3fb8a8d192f9bab5cfd76c014400n,
+ r: 0xc3b5e53656cd891e179ec3fb899d6d2cf06a7dee761e5ac5cn
}, {
- a: "5eaf418fbccefb4f53abc413c02cee60eb3880b615c615c2005b3d11c8ee4aaf3b4ded8",
- b: "-eb8aa4a72cf44f06208",
- r: "5eaf418fbccefb4f53abc413c02cee60eb3880b615c615c2005a518724471dbaec47cd0"
+ a: -0x7f9d48b408a5bb3827bc0149ecd538a2b2a7d4e1e0a4e9c36ee05f8be3a5b3n,
+ b: 0xd83d8cb2fb784703498cf0d61ff097ca42635664058cb85ea39f5931c37a1n,
+ r: -0x72196fe8d8ee36c7f323323c8ad62f260e819f7ba04c1e3d84a669f8c76e12n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a + b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a + d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: +");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/and.js b/deps/v8/test/mjsunit/harmony/bigint/and.js
index e5c3b145d6..7a68f8b3dc 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/and.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/and.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "-193b47d0d9a8688b329e80de92195f311825",
- b: "-2fabed9434bff933e23ea21af0f21a43",
- r: "-193b6ffbfdbc7cbffbbfe2feb21bfff31a67"
+ a: 0x9252b94f220ded0c18706998886397699c5a25527575dn,
+ b: -0x286817ba2e8fd8n,
+ r: 0x9252b94f220ded0c1870699888639769185a045015008n
}, {
- a: "35979ec99b7ff03f81817ebc9ddd50025d1ccf41565b502f0fc85ec54f630",
- b: "be96016cc031653c9b1eceb1dd",
- r: "8c9401648000603c810c44b010"
+ a: 0x15a28152354fe421bbe4649d4d22d536ac34f9ba962n,
+ b: 0x8n,
+ r: 0n
}, {
- a: "-f594f8199c11594681a9c38fd985a03d8c79ce6c8d342809924c89b385af43116ec3a21",
- b: "-53e7b9738caaecc58fde1b5a4aa9f782f28a04e2bb29d207ccd5d45",
- r: "-f594f8199c115946d3effbffddafecfd8fffdf7ecfbdff8bf2ce8df3bfafd317eed7f65"
+ a: 0xd09791bf7ae6703fae8d654104f54fcd957bbf028540b8e7c0f859fdcc1b7abd617en,
+ b: 0x24b473d03n,
+ r: 0x24a052102n
}, {
- a: "dd7245d3ca5b360296082e6ca91915179b257f36e45e6e44cf892db875fdcfb19522b3",
- b: "-dcc83137df3bb234e1144390f6c5bc0772a07f2a4540865554d20ebd37be",
- r: "dd7245d3ca0336028000044c891801140b013a02e00c4e00c5882d3820a90db1000002"
+ a: 0x2f73fbd05e9f7f70beba3dc7cd883732149a3d02a422f90f336n,
+ b: 0x4490583f8n,
+ r: 0x9008330n
}, {
- a: "-28",
- b: "eaec4017147fd9741ff3b98f1b6f71d8f3d6869c18b39c6237a6b2d4d2fc3c81e9",
- r: "eaec4017147fd9741ff3b98f1b6f71d8f3d6869c18b39c6237a6b2d4d2fc3c81c8"
+ a: 0xd6ea27844b3d356774e73de10d393ab9cadb81848e3b4a34d1f7440d3n,
+ b: 0x1639eeee731f3ba09234b5e15ec92f8bfb5cea28a0e89c59ed1c45b5f04n,
+ r: 0x10ea26000b39200234a5214009290ab948ca008088184824104504000n
}, {
- a: "-223909fc585f36f995d6f72dd9f169df1fad8",
- b: "b13e919ce59c18c7c0517eecdb2519155cc",
- r: "80360184a0880042000052240a040000508"
+ a: -0x131f5e2c196269fd140c8727a9a02170e0dddb6aec109a89bcn,
+ b: 0x401ec4eae8b756f2b222f6f565fb6acecce3c524e9b453n,
+ r: 0x12c488800242f230005655448b0a0204810124613440n
}, {
- a: "-fcb4ac9fdc7ee85d03585f944a79b28efffb461e17df2d",
- b: "13cd27fb49c92d53c567688ab6b9",
- r: "38023b100492100042160882091"
+ a: -0x5bcba128bf7b949780a4bd02782a63ae79f646a33b8672f3f692ab98n,
+ b: -0xa81dd8ff624072fce3a00eb0bd5700e5666c3beb7d59701c42489n,
+ r: -0x5bcba1fdbfffb497afeebf02fb2bf7fe7ff666e3bfb7f7f7f7d6afa0n
}, {
- a: "-1a16ca8c3725cec0c8a61ce81",
- b: "-dbf3e",
- r: "-1a16ca8c3725cec0c8a6dffbe"
+ a: 0xc997546528786548270061bcn,
+ b: 0x75023f046c04f9095f64d3b8c75ab21950an,
+ r: 0x499014640838044023000108n
}, {
- a: "-834db45b67472062091e",
- b: "5aff66623af6b6cd042a361d5a22aea03152b764a056c71",
- r: "5aff66623af6b6cd042a361d5a228a2030408304a056460"
+ a: 0x5ad0abd6e43ef787248af2607fb29b9c3f7a1e7b106269ff3bn,
+ b: -0xd483c8630392db4b620e0f6n,
+ r: 0x5ad0abd6e43ef787248af2607fb29380394a06520040491f0an
}, {
- a: "1a8c37cff2e02f5272bc61d60b8301e443c38172446f75d75e01c41f60",
- b: "e15d12bee18edaca77ad15ff0a567e132bb1b046623858",
- r: "215012bc61860a8201a401c30052440321911000401840"
+ a: -0x9ed448bdb4794e4c0485db9ebe6cad0b0n,
+ b: -0x549a70753c9afn,
+ r: -0x9ed448bdb4794e4c0485df9efe7dbd9b0n
}, {
- a: "-f463",
- b: "bb02038e2ff03fa",
- r: "bb02038e2ff0398"
+ a: -0x3a3fe42647633612082372714484767efc9673ce964b76f9f53a63d5n,
+ b: 0x99615e5edd0506cbb5512fb56ee2dd1238bn,
+ r: 0x986116189800068881100b0000608c1000bn
}, {
- a: "3178f92d2eeee1aebc33f085aa96c9046f1133ad6afbd666664ab79625639e001",
- b: "124d8bd8ea20d8e510ba30d9",
- r: "20d02406020586010382001"
+ a: -0xac1b8238c08f5n,
+ b: -0xb9439dfbbc70e12b3a1fc7ccbfebeacd9bb321d83d3an,
+ r: -0xb9439dfbbc70e12b3a1fc7ccbfebeacfdbbb23dc3dfen
}, {
- a: "fc7aaaa7a52f3604e1e700f01ea6f266912f583bffa78aee08939401056cde",
- b: "-50e3611d6ada075f432319f10c8192f1de56ead628972",
- r: "fc7aaaa7a52f3604e0e100e008025202010d4820ef2782c00012900005648e"
+ a: 0x120580503n,
+ b: 0x90e13d34618496af407fabefabdd23892f488n,
+ r: 0x20100400n
}, {
- a: "7dea10c67bdf023c00d94643e9f2d38295635b0b2b55a0e40818",
- b: "8defe4741785c6c2d2ecaf7752a903ed",
- r: "443e0701380844252082b5500a00008"
+ a: 0xaa51cn,
+ b: 0x2122a9c43b2531d5n,
+ r: 0x2114n
}, {
- a: "6f837e0ec2d00abb60051299bfd36b58c803f6445f91bb8dded858c6c1c476142",
- b: "-26746eda5ca5095ab8f315c88b201cfa2affbbb700fc3bba8626b5bfd0a",
- r: "6f837e08829000a3400400010cc22350c80304440000088d02c04040c14040042"
+ a: 0xaf43eb353c1547dc6ad79768e9a6n,
+ b: 0x55c80f89d8fe82dbf69eca557dd0479b6a449db77e38b2241ec81aa7d4042766dd3e63n,
+ r: 0x28022014081007d4000706482822n
}, {
- a: "ab69c286138358dea4308b60f12f212fcd1e0",
- b: "-c8735b6ce5513cc661fdae7941055028a2ea768dc13b9c83a79b9bf84e62cdf",
- r: "aa29c2041181501e84200840602401218d120"
+ a: 0xbd38b2d88212ceea6fbe3890769e46236n,
+ b: 0xeb22e568873e9bfdb5d2ddbc03n,
+ r: 0x88202468863a83890540c42002n
}, {
- a: "6c9ec2e7cdb2c1fb630a8e16323138db939c2a21e3576b777d",
- b: "-51cf93f77a711c00",
- r: "6c9ec2e7cdb2c1fb630a8e16323138db938c202000050a6400"
+ a: 0x24cd8c9634eddf319c57b54dffb49ce4974528f03b2c732d052ad496n,
+ b: -0x4079cfn,
+ r: 0x24cd8c9634eddf319c57b54dffb49ce4974528f03b2c732d052a8410n
}, {
- a: "edee34cd0c29ad27fed12e77a42aedbf8b53161716c90d516394b9038a2f125c95",
- b: "-18a515e3705a582d82f14bd42075b3b",
- r: "edee34cd0c29ad27fed12e77a42aedbf8b52140600c8085061049003022d100485"
+ a: -0x61f263476a1212fce55cf7d437f4364b73a1815a641n,
+ b: -0x19226cb79772cca166043a29d1f0fe1df5n,
+ r: -0x61f263476b9236fffd7fffde37f477ebffbf8ffbff5n
}, {
- a: "466fee7dabecbaea71c19892f045d7d196a80c6f",
- b: "-5c93c7afd552be",
- r: "466fee7dabecbaea71c19892f001441010280c42"
+ a: -0x44309b08a65bc7dc1e53d598d82ccdaa852941d7c851d907a6acn,
+ b: 0xdd93dc17d38658ee30453d5f27en,
+ r: 0x5d1304178102408230402d05054n
}, {
- a: "-657c587f67a70177797befb96f116c2843",
- b: "-c3b8e2",
- r: "-657c587f67a70177797befb96f11efb8e4"
+ a: 0x1e3eea4668e7a6c3bfa87d3f7a419bd18an,
+ b: -0x592d83940f884f9n,
+ r: 0x1e3eea4668e7a6c3bfa86d274201035102n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a & b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a & d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: &");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
index faa7dba866..51b5073d24 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/as-int-n.js
@@ -284,9 +284,7 @@
assertEquals(4294967296n, BigInt.asUintN(2**32, 4294967296n));
assertEquals(4294967297n, BigInt.asUintN(2**32, 4294967297n));
}{
- assertEquals(
- BigInt.parseInt("0x7234567812345678"),
- BigInt.asUintN(63, BigInt.parseInt("0xf234567812345678")));
+ assertEquals(0x7234567812345678n, BigInt.asUintN(63, 0xf234567812345678n));
}{
assertThrows(() => BigInt.asUintN(2n, 12n), TypeError);
assertThrows(() => BigInt.asUintN(-1, 0n), RangeError);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/basics.js b/deps/v8/test/mjsunit/harmony/bigint/basics.js
index 398d670ca8..99a9403db0 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/basics.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/basics.js
@@ -197,6 +197,28 @@ const six = BigInt(6);
// Multi-digit BigInts.
// Test parseInt/toString round trip on a list of randomly generated
// string representations of numbers in various bases.
+
+ // Userland polyfill while we wait for BigInt.fromString (see:
+ // https://mathiasbynens.github.io/proposal-number-fromstring/ ).
+ // This intentionally only implements what the tests below need.
+ function ParseBigInt(str, radix) {
+ const alphabet = "0123456789abcdefghijklmnopqrstuvwxyz";
+ var result = 0n;
+ var base = BigInt(radix);
+ var index = 0;
+ var negative = false;
+ if (str[0] === "-") {
+ negative = true;
+ index++;
+ }
+ for (; index < str.length; index++) {
+ var digit = alphabet.indexOf(str[index]);
+ assertTrue(digit >= 0 && digit < radix);
+ result = result * base + BigInt(digit);
+ }
+ if (negative) result = -result;
+ return result;
+ }
var positive = [0, 0, // Skip base 0 and 1.
"1100110001100010110011110110010010001011100111100101111000111101100001000",
"1001200022210010220101120212021002011002201122200002211102120120021011020",
@@ -273,28 +295,12 @@ const six = BigInt(6);
];
for (var base = 2; base <= 36; base++) {
var input = positive[base];
- assertEquals(input, BigInt.parseInt(input, base).toString(base));
+ assertEquals(input, ParseBigInt(input, base).toString(base));
input = negative[base];
- assertEquals(input, BigInt.parseInt(input, base).toString(base));
+ assertEquals(input, ParseBigInt(input, base).toString(base));
}
}
-// .parseInt
-{
- assertEquals("hellobigint", BigInt.parseInt("hellobigint", 32).toString(32));
- assertEquals("abc", BigInt.parseInt("101010111100", 2).toString(16));
- // Detect "0x" prefix.
- assertEquals("f00dcafe", BigInt.parseInt("0xf00dcafe").toString(16));
- // Default base is 10, trailing junk is skipped.
- assertEquals("abc", BigInt.parseInt("2748junk").toString(16));
- // Objects are converted to string.
- let obj = {toString: () => "0x12345"};
- assertEquals("12345", BigInt.parseInt(obj).toString(16));
- // Empty and invalid strings throw.
- assertThrows("BigInt.parseInt('')", SyntaxError);
- assertThrows("BigInt.parseInt('nope', 2)", SyntaxError);
-}
-
// .valueOf
{
assertEquals(Object(zero).valueOf(), another_zero);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/comparisons.js b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
index 513ff37d00..38150c74bf 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/comparisons.js
@@ -429,8 +429,8 @@ const six = BigInt(6);
assertFalse(%Equal("-0x1", minus_one));
const unsafe = "9007199254740993"; // 2**53 + 1
- assertTrue(%GreaterThan(BigInt.parseInt(unsafe), unsafe));
- assertTrue(%LessThan(unsafe, BigInt.parseInt(unsafe)));
+ assertTrue(%GreaterThan(eval(unsafe + "n"), unsafe));
+ assertTrue(%LessThan(unsafe, eval(unsafe + "n")));
assertThrows(() => %LessThan(six, Symbol(6)), TypeError);
assertThrows(() => %LessThan(Symbol(6), six), TypeError);
@@ -508,10 +508,10 @@ const six = BigInt(6);
assertFalse("-0x1" <= minus_one);
const unsafe = "9007199254740993"; // 2**53 + 1
- assertFalse(BigInt.parseInt(unsafe) < unsafe);
- assertFalse(BigInt.parseInt(unsafe) <= unsafe);
- assertTrue(unsafe < BigInt.parseInt(unsafe));
- assertTrue(unsafe <= BigInt.parseInt(unsafe));
+ assertFalse(eval(unsafe + "n") < unsafe);
+ assertFalse(eval(unsafe + "n") <= unsafe);
+ assertTrue(unsafe < eval(unsafe + "n"));
+ assertTrue(unsafe <= eval(unsafe + "n"));
assertThrows(() => six < Symbol(6), TypeError);
assertThrows(() => six <= Symbol(6), TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/dataview.js b/deps/v8/test/mjsunit/harmony/bigint/dataview.js
new file mode 100644
index 0000000000..c34a42ac1d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/dataview.js
@@ -0,0 +1,78 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-bigint
+
+var buffer = new ArrayBuffer(64);
+var dataview = new DataView(buffer, 8, 24);
+var bytes = new Uint8Array(buffer);
+
+var b1 = 0xff1234567890abcdefn;
+var b1_64 = BigInt.asUintN(64, b1);
+
+dataview.setBigInt64(8, b1);
+assertEquals(0x12, bytes[16]);
+assertEquals(0x34, bytes[17]);
+assertEquals(0x56, bytes[18]);
+assertEquals(0x78, bytes[19]);
+assertEquals(0x90, bytes[20]);
+assertEquals(0xab, bytes[21]);
+assertEquals(0xcd, bytes[22]);
+assertEquals(0xef, bytes[23]);
+assertEquals(b1_64, dataview.getBigInt64(8));
+
+dataview.setBigInt64(8, b1, true); // Little-endian.
+assertEquals(0xef, bytes[16]);
+assertEquals(0xcd, bytes[17]);
+assertEquals(0xab, bytes[18]);
+assertEquals(0x90, bytes[19]);
+assertEquals(0x78, bytes[20]);
+assertEquals(0x56, bytes[21]);
+assertEquals(0x34, bytes[22]);
+assertEquals(0x12, bytes[23]);
+assertEquals(b1_64, dataview.getBigInt64(8, true));
+
+dataview.setBigUint64(8, b1);
+assertEquals(0x12, bytes[16]);
+assertEquals(0x34, bytes[17]);
+assertEquals(0x56, bytes[18]);
+assertEquals(0x78, bytes[19]);
+assertEquals(0x90, bytes[20]);
+assertEquals(0xab, bytes[21]);
+assertEquals(0xcd, bytes[22]);
+assertEquals(0xef, bytes[23]);
+assertEquals(b1_64, dataview.getBigUint64(8));
+
+dataview.setBigUint64(8, b1, true); // Little-endian.
+assertEquals(0xef, bytes[16]);
+assertEquals(0xcd, bytes[17]);
+assertEquals(0xab, bytes[18]);
+assertEquals(0x90, bytes[19]);
+assertEquals(0x78, bytes[20]);
+assertEquals(0x56, bytes[21]);
+assertEquals(0x34, bytes[22]);
+assertEquals(0x12, bytes[23]);
+assertEquals(b1_64, dataview.getBigUint64(8, true));
+
+var b2 = -0x76543210fedcba98n;
+dataview.setBigInt64(8, b2, true);
+assertEquals(0x68, bytes[16]);
+assertEquals(0x45, bytes[17]);
+assertEquals(0x23, bytes[18]);
+assertEquals(0x01, bytes[19]);
+assertEquals(0xef, bytes[20]);
+assertEquals(0xcd, bytes[21]);
+assertEquals(0xab, bytes[22]);
+assertEquals(0x89, bytes[23]);
+assertEquals(b2, dataview.getBigInt64(8, true));
+assertEquals(0x89abcdef01234568n, dataview.getBigUint64(8, true));
+
+assertThrows(() => dataview.setBigInt64(0, 1), TypeError);
+assertThrows(() => dataview.setBigUint64(0, 1), TypeError);
+assertThrows(() => dataview.setInt32(0, 1n), TypeError);
+assertThrows(() => dataview.setUint32(0, 1n), TypeError);
+
+// None of the stores wrote out of bounds.
+for (var i = 0; i < 16; i++) assertEquals(0, bytes[i]);
+for (var i = 24; i < 64; i++) assertEquals(0, bytes[i]);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/dec.js b/deps/v8/test/mjsunit/harmony/bigint/dec.js
index 5e1f40b2dd..ddb0431cba 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/dec.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/dec.js
@@ -7,74 +7,73 @@
// Flags: --harmony-bigint
var data = [{
- a: "-609648ccf253976b12f6b6c8e20790c17ef6b89ea9f536267783607cf465b1ca",
- r: "-609648ccf253976b12f6b6c8e20790c17ef6b89ea9f536267783607cf465b1cb"
+ a: 0x26ffcdbd233a53e7ca4612f2b02e1f2c1d885c3177e7n,
+ r: 0x26ffcdbd233a53e7ca4612f2b02e1f2c1d885c3177e6n
}, {
- a: "-6e4c39cdd2c666e32cf2fd3c53a20eeb725e7578af97d42",
- r: "-6e4c39cdd2c666e32cf2fd3c53a20eeb725e7578af97d43"
+ a: 0xf2a29a35193377a223ef0d6d98db95eeb24a4165f288fd2b4an,
+ r: 0xf2a29a35193377a223ef0d6d98db95eeb24a4165f288fd2b49n
}, {
- a: "34c93e1c",
- r: "34c93e1b"
+ a: 0x454d22e29e6104n,
+ r: 0x454d22e29e6103n
}, {
- a: "-db3032",
- r: "-db3033"
+ a: -0xb00874640d30e6fce6bf79508378ed17e44dacb48a4200bce536cec462b3c2n,
+ r: -0xb00874640d30e6fce6bf79508378ed17e44dacb48a4200bce536cec462b3c3n
}, {
- a: "8e658ffacbefbdec5",
- r: "8e658ffacbefbdec4"
+ a: 0x4c151a24d765249c2bab4a1915b24b80ae437417c5n,
+ r: 0x4c151a24d765249c2bab4a1915b24b80ae437417c4n
}, {
- a: "-d321033ec94d6a75f",
- r: "-d321033ec94d6a760"
+ a: -0xcbd476b1f9ca08ff820941n,
+ r: -0xcbd476b1f9ca08ff820942n
}, {
- a: "-286017f718d6118b581ec4357e456ce6d12c01aed9a32ff0cc048d",
- r: "-286017f718d6118b581ec4357e456ce6d12c01aed9a32ff0cc048e"
+ a: -0xe848e5830fa1035322b39c2cdd031109ca8n,
+ r: -0xe848e5830fa1035322b39c2cdd031109ca9n
}, {
- a: "c0",
- r: "bf"
+ a: -0x4d58c5e190f0ebac5bb36ca4d214069f69726c63a5n,
+ r: -0x4d58c5e190f0ebac5bb36ca4d214069f69726c63a6n
}, {
- a: "9f9577e008a6f46f7709f71362176ebe23d19eb9e58a41de6f2631b18f2ca",
- r: "9f9577e008a6f46f7709f71362176ebe23d19eb9e58a41de6f2631b18f2c9"
+ a: 0x9b396n,
+ r: 0x9b395n
}, {
- a: "-9d4294590df0aa8ea46a5c2a3d186a6afcc00c6ebb072752",
- r: "-9d4294590df0aa8ea46a5c2a3d186a6afcc00c6ebb072753"
+ a: 0x593921fe8b9d4906cn,
+ r: 0x593921fe8b9d4906bn
}, {
- a: "-4bc2aed1641151db908c0eb21aa46d8b406803dc0f71d66671322d59babf10c2",
- r: "-4bc2aed1641151db908c0eb21aa46d8b406803dc0f71d66671322d59babf10c3"
+ a: -0xe127928c7cecd6e9ca94d98e858f9c76a0fccac62203aac7710cef1f9e352n,
+ r: -0xe127928c7cecd6e9ca94d98e858f9c76a0fccac62203aac7710cef1f9e353n
}, {
- a: "-1dfb3929632fbba39f60cabdc27",
- r: "-1dfb3929632fbba39f60cabdc28"
+ a: 0xeb14cd952d06eb6fc613016f73b7339cbdd010n,
+ r: 0xeb14cd952d06eb6fc613016f73b7339cbdd00fn
}, {
- a: "c0d409943c093aec43ba99a33ef2bb54574ecdc7cccf6547ab44eafb27",
- r: "c0d409943c093aec43ba99a33ef2bb54574ecdc7cccf6547ab44eafb26"
+ a: -0xfdeab6a3dbd603137f680413fecc9e1c80n,
+ r: -0xfdeab6a3dbd603137f680413fecc9e1c81n
}, {
- a: "3d148dcffe94f859c80b38c4",
- r: "3d148dcffe94f859c80b38c3"
+ a: -0x7e9abbdfad170df2129dae8e15088a02b9ba99276a351a05n,
+ r: -0x7e9abbdfad170df2129dae8e15088a02b9ba99276a351a06n
}, {
- a: "0",
- r: "-1"
+ a: 0x7b98f57n,
+ r: 0x7b98f56n
}, {
- a: "d659f6507e0ac2e653bdb7c3fb38c1514dd33619a9a0c87fcb69b22",
- r: "d659f6507e0ac2e653bdb7c3fb38c1514dd33619a9a0c87fcb69b21"
+ a: -0x919751deb470faa60d7c5c995c8bed72f9542d710fbbf1341n,
+ r: -0x919751deb470faa60d7c5c995c8bed72f9542d710fbbf1342n
}, {
- a: "14efe",
- r: "14efd"
+ a: -0xc5541d89b118a88afdd187228440427c8a24f9d9bn,
+ r: -0xc5541d89b118a88afdd187228440427c8a24f9d9cn
}, {
- a: "-f2df301948cd17ff391a6589a67551c00679687ba5",
- r: "-f2df301948cd17ff391a6589a67551c00679687ba6"
+ a: -0xe6c88a170595fn,
+ r: -0xe6c88a1705960n
}, {
- a: "-e",
- r: "-f"
+ a: -0xa1ffbfa388c332804dc4dc973n,
+ r: -0xa1ffbfa388c332804dc4dc974n
}, {
- a: "-a09cf77fea7af1767695c978af13fdb62f4f040b6fb803625fb124cc99139cddadd",
- r: "-a09cf77fea7af1767695c978af13fdb62f4f040b6fb803625fb124cc99139cddade"
+ a: 0x67b768ce0c415127a77402861d1901dd7f60a8624ebea6ecafe03adc3cen,
+ r: 0x67b768ce0c415127a77402861d1901dd7f60a8624ebea6ecafe03adc3cdn
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var r = --a;
- if (d.r !== r.toString(16)) {
- print("Input: " + a.toString(16));
+ var r = --d.a;
+ if (d.r !== r) {
+ print("Input: " + d.a.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
error_count++;
diff --git a/deps/v8/test/mjsunit/harmony/bigint/div.js b/deps/v8/test/mjsunit/harmony/bigint/div.js
index 771a0c7fda..1eeea1184f 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/div.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/div.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "c0bb18527ab19a800932cc14c553c69688372809dde38f095fdb99478a7eba",
- b: "8137ebc482f361a3e43fb80fe2ba509caa3f0acc62be0d2",
- r: "17dd3b8ecbdd71a4"
+ a: -0x1e0f357314bac34227333c0c2086430dae88cb538f161174888591n,
+ b: 0x390n,
+ r: -0x8700f00dd65d4009ab90c5d3a887e9c8ad110a72ca7e0f84a40n
}, {
- a: "10c5163e874c786fcfcb48bbb0ccf8d6c66f480b76332194",
- b: "4e462b2",
- r: "36d8ca96869a7226d456bff9b5ed3effdcaaf6f83"
+ a: 0x9b7a0098fd2d495b4b2c76ff5888ae2a0359cfec6b029e6f3ee9b014802n,
+ b: 0xbn,
+ r: 0xe225d252e49ef6563ecc4fff0c69b782ed99e89db2ec895bfe6b2ea974n
}, {
- a: "-677f8c8668fcc826129f2724a5b88a6225dd19959810b5254afbc0",
- b: "-2bf8ff1c49636",
- r: "25a8bd406d721554f45ad37cc691d58821ad163f3a"
+ a: 0x6821a6dad136b9f64096337b45d6d947cf0f0b55501e33e66fbc4ef9776n,
+ b: 0xan,
+ r: 0xa69c3e2ae8578ff06756b8c53c8af53fb1b1abbbb363863d7f93b18f25n
}, {
- a: "e80276ba7d416f4f7e5f8917529eb204cc7ed08413261e73aa23f169eb46e7162",
- b: "-c884f07a",
- r: "-12834073e7cbc44804a99cb2778fb82b098645946d5e4f5e6c47c49267"
+ a: 0xf42f96f0c7f5a9c656a66122a9bcc57aa386bdb9a834e4174d45795ae408dae14e085cn,
+ b: -0x5e0d81a1107ef9a38b4f24b4n,
+ r: -0x298a4d479eac8c186fd61939024b420080b6ae6500c70c1n
}, {
- a: "-f9a51f64ce995e0a1c7f8369a573dae2533bc3df801edbb79235d41502e1ce",
- b: "-f33bf",
- r: "106bf522569501704f9700d6bfcd203a606e63725e339e92ff931f568f"
+ a: 0xbaf7baed984dda3c20f23201f366257c601f7fbcaf51d8b738a68fd92b4n,
+ b: 0xd24a1eafcn,
+ r: 0xe39bd4d870d72880e6ad88ec17bcca6bd0eaacfbbd28ba7458n
}, {
- a: "-6c9ae08dfa5b11c29ae30e53c108d915e0e87f1a8ca82f42d23a53b08895",
- b: "-43fd79afe1ae6a4994ee7dfc2c89453d6b6",
- r: "198ecd667e8cca17c2839b38fe"
+ a: -0xa658d0683d450bfec3de3fe4253f4348dd8e9a5b1002f6c24ff0en,
+ b: -0xeffb9a6fa690e0be59aba488b7a7c5a85a65a018242n,
+ r: 0xb1730d7206n
}, {
- a: "-d20a2f7074ecbc776b64c2e04ff007e194b8cdd106b2be2e78d752f2d16e9",
- b: "43eb8ae7d7f9be6d77e3be696ffef",
- r: "-317ab1a09014950e1c53b9e3e8f44873c"
+ a: 0x5a65f7b2dace4550099f4009192caa513468ef4da7e2fd81n,
+ b: 0xf144f8bfad42d9beaf7b82a46561n,
+ r: 0x5feae36e8e4932adcb21n
}, {
- a: "-999e0f75f2f652d403840cea3536b1a522433a331dace7c39d7990b993908fdd6",
- b: "cea1de74020232c6c4bedca49d63f140c",
- r: "-be51ac62c073e7d7cb9e43fc12436bfe"
+ a: -0x87271b0cbb63eba5e3d70f6af1aa6ec60c0a7884fb8d657b03en,
+ b: -0xb67f8c18099ce8fa0c0f4ce2852b5590f7a8037d326b0n,
+ r: 0xbd9602n
}, {
- a: "1b49ac0c323436294f1471a3f00feb197b9b42549c3",
- b: "5a7e30888a22fbbcecfe2a0b2e5eca30",
- r: "4d321e8fe1c"
+ a: 0xd4bc42a434dba1427d387f0858aaf39c6281ffc0f10b8b0996cb3bfn,
+ b: -0x4198d1fec9b48e38d42f2e295abc39c2a6866527efn,
+ r: -0x33e39b6878d955n
}, {
- a: "-689adf0d332ed26c63f8f361d5cb66056321b683ce87c60567df7305396f20d6ab8",
- b: "82f3b92f5bb57811fdb66f519",
- r: "-cc7e64b9ef239ac4c31b6ae3687aa181bda7657222"
+ a: 0x6a6f0c97d974dfd20337ca611dbdc96433964a206d54n,
+ b: 0x16c7c6f9e0e4e24d521e463fecb394e2936f3104590n,
+ r: 0x4an
}, {
- a: "-6fecb457f9e05c15b7fe038494c25053938d4747a01577cf2dc939a21e",
- b: "b7cb6ce62c1d7483d0b675109f5782a1ad19",
- r: "-9be52d2e94716bb441dbcd"
+ a: -0xd09af7515272d81a64307afaed8854c8e2996d8e01d4864n,
+ b: 0x7521dfcbdf4cfbbf8d3859bf67618c91dc6915718303dbn,
+ r: -0x1cn
}, {
- a: "f96f8276f314d37657ce7774bc539198ee84fcec8a53cbb3d36ad81040b715",
- b: "-891c9a0dd99b02",
- r: "-1d1b803e3484173c0d2e8add5042117eaa27c853c222f5fdd"
+ a: 0xf0da5f24832a2b651aac5dff62b22475212c26e123ff78c34560f35e9bcn,
+ b: -0xf9n,
+ r: -0xf79fbd51bf65f518c82563b974e4632b505ebe13adbfb6c297859a98cn
}, {
- a: "-d2c9a9068d57f2ebc9ad432b48b4135f2a911519d4b791",
- b: "-a34d124b5b4825d314683098d0",
- r: "14a715b96671307438926"
+ a: 0x76f4f5b5de8a263514554d62273e51f40342d5df692ae17625ead5b5fen,
+ b: -0xa5e697c57fb65858604e0a060737e7d9d1en,
+ r: -0xb78fb2167de210959e57cd3n
}, {
- a: "-59593d55f61c1c739601e15624fe2e61592fe6abf1ecb238c9f8e1a7bded9c1bd1c",
- b: "f067a5a155fc894b0f7f0a939f1772c4d135a",
- r: "-5f250867331e7eff64dafcd58e9922"
+ a: -0xa714d4d9d747fa5258f80f3887f4dda8a11423050046282n,
+ b: -0xaa3b218ab92e21920a707487fdbn,
+ r: 0xfb437487ba8580181ca3n
}, {
- a: "2c0a3172494623013ba14d01433ad38167f365765b1c0ca610",
- b: "ffe8",
- r: "2c0e52ca0c37483000215020c64d6ac36a455bf6fa4382"
+ a: -0x165f3c659ac4157cb3af081b3fc42c0c788e2df501596c2b8n,
+ b: -0xfaa86d6eca3n,
+ r: 0x16d94bce4865cf842d17a399d8b4467f3f369fn
}, {
- a: "-a9cde7cdfe56eb8bd6777543ae714fadac2d97a394d8e9e8",
- b: "104b0d7135d3d2",
- r: "-a6bfb9031e60b5bc4b20e814cabfd80bd27"
+ a: 0x268d93a21aab44c52f961b1c97c002f427eb8b11e0707e3879c3a348f8e2b1c40c122n,
+ b: 0x70ccda2e2bba53b124c877bb4edbdbd387742cfe59d9bn,
+ r: 0x577effaeb2338647e38edb43n
}, {
- a: "6216ceed0d221476bfba7701f8297af56a4e66d003f8165b",
- b: "bfcaaf8676ad6a",
- r: "82ed6efc83669b0bc476bdd717dcfb6f10"
+ a: 0x3a13493c9e8d3d986a2a56c0aa811be408e1560f30ae155n,
+ b: -0x62a1a35fd7e184be45a1d0ba67888f0d982afb7a90an,
+ r: -0x96bcn
}, {
- a: "-707752a899efbe8989d205293535a404a6afb39cf21ce1274",
- b: "-3a24848be1024a6ea901f",
- r: "1ef2f2a8c0ad85ff4e6e6afdd3966"
+ a: -0x2a3f3f90ea280bf64e7e460f004ea4fb2553637cb7c844f349f045ffdbn,
+ b: -0xf2af2581bddfd9bfc1158c86211c4f4b169c0b1n,
+ r: 0x2c90ab8577b8d0ee86en
}, {
- a: "-e662fb8c46f979ff3b3f576fffd5c51cf70071ab61168b2eafee4708af",
- b: "964c4dd019b9a543df0cd1830a90428ec84ec04f8165283",
- r: "-18869d87ce54"
+ a: 0x8fd5f992d4d767c8a360ad32142fa9e9d87bc0036d1d6bb7aa7471dd7f38bn,
+ b: 0xe70n,
+ r: 0x9f6709805e05fd8b3c433e470387f06391b6948459e4c2bc072747e2a0n
}, {
- a: "b4403dee3a192009a5aae85c74efdb52b66dee1be6befa66f708ca9bf6b7",
- b: "-36db9f9f6e",
- r: "-34928ce65c2b71a6b6ce351838d4263d91ff2bceecec7a91441"
+ a: 0x52c0e08ba0a22a00498058dfeeaab2b21909e81e30c48652699n,
+ b: 0x1d516c96827an,
+ r: 0x2d2967f58654a5d30934348b225bf2b6e2a3123dn
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a / b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a / d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: /");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/inc.js b/deps/v8/test/mjsunit/harmony/bigint/inc.js
index 64865a2b32..4ead89e1bf 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/inc.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/inc.js
@@ -7,74 +7,73 @@
// Flags: --harmony-bigint
var data = [{
- a: "-989c298c6fc3",
- r: "-989c298c6fc2"
+ a: 0xb3df90n,
+ r: 0xb3df91n
}, {
- a: "bff2c86c449a56",
- r: "bff2c86c449a57"
+ a: 0xaacd53fc9545eb394304a3c69caba730230a6527730an,
+ r: 0xaacd53fc9545eb394304a3c69caba730230a6527730bn
}, {
- a: "-6fb15264369b63e3b92d6f74458140d4b62a56ecbfca000492b6b8a0c56c651ebaecdd1",
- r: "-6fb15264369b63e3b92d6f74458140d4b62a56ecbfca000492b6b8a0c56c651ebaecdd0"
+ a: 0x32efa4ba1c29098be7df8fa0f3c74a183612ecn,
+ r: 0x32efa4ba1c29098be7df8fa0f3c74a183612edn
}, {
- a: "e161f6284b91fa2646dc0f162c575bb8a0d0f5bfee26748b7181413567",
- r: "e161f6284b91fa2646dc0f162c575bb8a0d0f5bfee26748b7181413568"
+ a: -0xcabed06f63ad50406b96187208cf9ec6faf46413ce83e280c31ben,
+ r: -0xcabed06f63ad50406b96187208cf9ec6faf46413ce83e280c31bdn
}, {
- a: "-4d073653cc812",
- r: "-4d073653cc811"
+ a: -0x8a40ce58f753466f4008faa0814e76d9b594908df0e42cf6ad4a781e60ce2n,
+ r: -0x8a40ce58f753466f4008faa0814e76d9b594908df0e42cf6ad4a781e60ce1n
}, {
- a: "-ce31549364717dea9d1bf30baed642f",
- r: "-ce31549364717dea9d1bf30baed642e"
+ a: -0x848c8fc23bc93801n,
+ r: -0x848c8fc23bc93800n
}, {
- a: "2ae123a62361f1de2cc5ca9cfd9658f47d",
- r: "2ae123a62361f1de2cc5ca9cfd9658f47e"
+ a: -0x13a8316c5af3f3b06aab6642e717a7bdn,
+ r: -0x13a8316c5af3f3b06aab6642e717a7bcn
}, {
- a: "-4820298153b7bbd86337ad72e0d1ac7448de99bc6ce4c43c2",
- r: "-4820298153b7bbd86337ad72e0d1ac7448de99bc6ce4c43c1"
+ a: -0x5f5e517cf7efe577cd689c36d1beb3d9df4cfdc3fcb55ed9e9d8fan,
+ r: -0x5f5e517cf7efe577cd689c36d1beb3d9df4cfdc3fcb55ed9e9d8f9n
}, {
- a: "2e",
- r: "2f"
+ a: 0n,
+ r: 0x1n
}, {
- a: "-8f3b598ac2ab8a78a2d3e1f7ab1124b05a830aa1261bf57d8de2a",
- r: "-8f3b598ac2ab8a78a2d3e1f7ab1124b05a830aa1261bf57d8de29"
+ a: 0x107d5ec8048c4d715e213ec74f9f04006d3f2d90dd95d17c3e82f7a4ccn,
+ r: 0x107d5ec8048c4d715e213ec74f9f04006d3f2d90dd95d17c3e82f7a4cdn
}, {
- a: "-5c070fdee0d3f4a9adc63",
- r: "-5c070fdee0d3f4a9adc62"
+ a: 0xb8e09fc6037a75bbfn,
+ r: 0xb8e09fc6037a75bc0n
}, {
- a: "-3700cd6a6d1e68de1",
- r: "-3700cd6a6d1e68de0"
+ a: 0x515d1b999660074184n,
+ r: 0x515d1b999660074185n
}, {
- a: "56c68c",
- r: "56c68d"
+ a: 0xe12def67dc33628638612b8137d0fa425ea2dn,
+ r: 0xe12def67dc33628638612b8137d0fa425ea2en
}, {
- a: "-1ab894376fcf0dab9c",
- r: "-1ab894376fcf0dab9b"
+ a: 0x96a9n,
+ r: 0x96aan
}, {
- a: "-937dcf37c57588e55260c3eea20318",
- r: "-937dcf37c57588e55260c3eea20317"
+ a: -0x9df122a299a5e65b275028n,
+ r: -0x9df122a299a5e65b275027n
}, {
- a: "-f8ee63b438580a8915baf84edcfd0688247905e593e153644a88761bab0",
- r: "-f8ee63b438580a8915baf84edcfd0688247905e593e153644a88761baaf"
+ a: 0x20dd05bfb5ed6ebfb05da9a7488800b8895fbfe6d709eecb6071cda73e13n,
+ r: 0x20dd05bfb5ed6ebfb05da9a7488800b8895fbfe6d709eecb6071cda73e14n
}, {
- a: "-c63d4353ddf575bf02",
- r: "-c63d4353ddf575bf01"
+ a: 0xb38294d2e5523534c56dd6ef38ba744b29e3a3bb6f863f1aeaen,
+ r: 0xb38294d2e5523534c56dd6ef38ba744b29e3a3bb6f863f1aeafn
}, {
- a: "-bef439cc990a8b1d69b80fa7b3c703ba2",
- r: "-bef439cc990a8b1d69b80fa7b3c703ba1"
+ a: -0x6b58de4ba9b2699ea6777dc1adc0b2486n,
+ r: -0x6b58de4ba9b2699ea6777dc1adc0b2485n
}, {
- a: "6007edbd20b0ba90d86cf80e871e898c8907bb37",
- r: "6007edbd20b0ba90d86cf80e871e898c8907bb38"
+ a: 0xf40028a48894df5d81131046477a22e55cbead5n,
+ r: 0xf40028a48894df5d81131046477a22e55cbead6n
}, {
- a: "-692b36b5e874a448415678ac2b4d0a61c75bdc1674ae14d753cad7d46846",
- r: "-692b36b5e874a448415678ac2b4d0a61c75bdc1674ae14d753cad7d46845"
+ a: -0x44cbe6c7f5e7e879d3cb2eaffc4af0a43affc094218031a1f123746549dd7b8cdcbdn,
+ r: -0x44cbe6c7f5e7e879d3cb2eaffc4af0a43affc094218031a1f123746549dd7b8cdcbcn
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var r = ++a;
- if (d.r !== r.toString(16)) {
- print("Input: " + a.toString(16));
+ var r = ++d.a;
+ if (d.r !== r) {
+ print("Input: " + d.a.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
error_count++;
diff --git a/deps/v8/test/mjsunit/harmony/bigint/mod.js b/deps/v8/test/mjsunit/harmony/bigint/mod.js
index c310e2d3b4..c8cc7fa4fd 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/mod.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/mod.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "-67c2a5ba4221c048b64ecca6dd979391c44296b8b87ce97584ebab03be18",
- b: "-bebdbd9b278d05704416fd87d09aece9b69f98843",
- r: "-8794ba5d9204eb8adfb5e44c11a65fe5af057e6a4"
+ a: 0xaed3c714bb42a73d708bcf1dc9a9deebadc913ef42bac6a6178a60n,
+ b: -0xf3d6bd1c059b79n,
+ r: 0x2a4f893c8ece3n
}, {
- a: "-843cfbe576106b8e7c0aabec2cde402e7f76a711dbbf6ae79d6d932e22cf",
- b: "553d800ffcf69f80026",
- r: "-272250d344908c21525"
+ a: -0x8a6d8cb8e32aba50e08fd50aecbda6821e1863bf9a2n,
+ b: 0xe19ab8530bce542ec6b4c848fc383a2b688f07976dd523dabn,
+ r: -0x8a6d8cb8e32aba50e08fd50aecbda6821e1863bf9a2n
}, {
- a: "-b5fc9e8e21fbad798bcd3571ab169dd5e2059d1a9aa4d1bf44fe18da725b",
- b: "-da5d287414fbb2b4b",
- r: "-2610eb2277e01a758"
+ a: 0x7cd19a308b217ddd9dcda69ebe37767f09a483cb87060314aeade6f6c619d27c3n,
+ b: 0xae30b5c754e0an,
+ r: 0x619154797565dn
}, {
- a: "a9c92d42b53a6fbf0824f5b7bd9ed1cab1f9419d7ec2b50f192abfa6d620d53",
- b: "9de30592e1fe27d26afe06bbb4781c77ef58418",
- r: "9a67af5868a619a48883aed4c0d0a876e2ce143"
+ a: -0xaa90d777e849974d7ebebd2ed76dce635e36713e9e0c97cen,
+ b: 0xcc2bc7239bf83ecafc74b25750981df258n,
+ r: -0xa32d47bc250c5a41d147a862099778685en
}, {
- a: "ffa271b138163a779c89f17aa720490854520120b1307ef",
- b: "ab27ef49f3a00085981a5bc1c13530aec35e29c",
- r: "4c49a63b98d2dd7a33a843f98ba82b54909e337"
+ a: 0x7386178fe1159c4ca3c4e5caa6eda4413926047efd22007931c22ab54bn,
+ b: 0xc05100caa704bca144n,
+ r: 0x55ead7f83d4a795753n
}, {
- a: "d43f739464bcc8643dfaa807cbbe1157189e33368dd19b800db3682bb0dcb73",
- b: "bf66b1dc93054920039f9b3eba688d9",
- r: "bb2f3000959d66b3962d755d141c3ac"
+ a: -0xa48867cf46692351a59bc89c0ddc9a6d2cd9f72578c0bab72d8389n,
+ b: 0x77c2eec6cc0bef33dd4756b223c35a0f2b528f8648a7n,
+ r: -0x184a6eed8fcd32399c59605c7255b5614b388be24b44n
}, {
- a: "111ae5ba62ec37fd157ef531195363c5c8ace4427ff58811746af94ab8",
- b: "58112d0d88f3c8722d1e28942e8949c433c4619b451",
- r: "283276aa2b6316ca2146b1e6bcc8aa8de4b5446847a"
+ a: 0xac7681c26f78191107c9cde47f984fc7ffb7bffd0a055fc14n,
+ b: -0xe2d91d1a0e1edc3f806a24c36f7ca0a8a9b44b66c9n,
+ r: 0xdf0b3460335089fca0b1da676b3d1e9caf15afd3den
}, {
- a: "-4609a1c7d563bbdb49fd01e05031395e6e06b78407e440",
- b: "-75ee71f85344163bae2ba0e438",
- r: "-68b517938971cb261babea1cf8"
+ a: -0x3035011f4adba0c032ddc2fc884ff7663b45cb3baebec0fn,
+ b: 0xcc27f9a21470911a61aae50ae118ebcf85ea4dn,
+ r: -0xc5ea1db5af95ccdce2844b13b82659be1ac2adn
}, {
- a: "292cd2c5eb3e80942066c5af8bfdaf8d1d091f61f3005d226318",
- b: "5",
- r: "3"
+ a: -0x2dbe79c42b1e8db7aea4ca9cfaf1350528b096e23f69ea322cn,
+ b: -0x82f7e4f2ee432485n,
+ r: -0x824bddbaf61ce383n
}, {
- a: "-a9f8c03f06200b54959a2ced325090be25417f0bf1274d",
- b: "13eb3a0c772fc98e537883a3ef72e1ee37b249ee17c0510",
- r: "-a9f8c03f06200b54959a2ced325090be25417f0bf1274d"
+ a: 0xc5c7fbeb60fdb9f8e8347ebf9fe303e3223394b2db93ce2n,
+ b: 0xcd988b4cb278a39fd3ae58c16616855f01fab0fb0n,
+ r: 0xc3b5157e680c8ca66df1bd640c6ad1601bd612b02n
}, {
- a: "c4ddadae9abee150068fe6536c6b2fe229070410da61a09abe1a7270b",
- b: "5f4d55e3345e37fe8887f3ca5e",
- r: "42eea65dd545aacdcd250b505b"
+ a: 0x73dd1e27450edb450fe4bd70494a8ad85a8ad9691bc40bf500a1n,
+ b: 0xc8fb467f71be8a6b965d21857ecfdn,
+ r: 0x646e6d9fa1e2eb2933b2e5a837d37n
}, {
- a: "-7a73d61e639dacedd207dfe1edc630b1dfda9078489a7f0cf79dcdfcbf3992efc13861f",
- b: "-c0f2b9045bb3865d89cc0c9920c3ccfae382c250",
- r: "-6bdc3d5943d7a35e4ecbc5c8deb335ca2c3bbc0f"
+ a: -0xecb217583ec4240399bfa34560b45f1fd08efd0ebb56b2dn,
+ b: 0xeba34abn,
+ r: -0xc2d347bn
}, {
- a: "-a9ac4a2e055f22c8ba7956ffca5457a71412eb74d3a180555bb25ce5096e23d6c619",
- b: "-f3c",
- r: "-a5d"
+ a: 0x29c8a9854802b9317b90683eb82609b3aa695598807fea0482a56567n,
+ b: -0xabe5e55b228f8n,
+ r: 0x52303615b525fn
}, {
- a: "-b16cfd7fbbf820afc77be1590fd9802ecd12059238b98bb96d9d215af4808",
- b: "2c288a9de167",
- r: "-2b3901c1b8d1"
+ a: 0x7b532ee90b3945dbed20180632f46c90d0edb558e39456103f50e5a74ff1dn,
+ b: 0x17n,
+ r: 0x6n
}, {
- a: "-df12dd3e56dc3c3dd769ad964f8356a5860177f1b4a3b95acc75",
- b: "4ec6f5474b18",
- r: "-17bf0980582d"
+ a: -0x6f1dbf8dedea796a2423fad9688d71340963da3a4f005f13e9cd4aa03b96c864552cn,
+ b: -0x408a4f36ec4a1b74642fb0a4559en,
+ r: -0x33c06c53e6e47ca87f36447f1b0n
}, {
- a: "1d197bf4aa09a02760cb004e9edf25e6591ae14d92b6cbf1349ea1c040d66",
- b: "-23ebacc5f380e5649a1234c3ed050472569cbcd056",
- r: "20e5e588b4861be3ec7b4005a6a50566e60a3a4364"
+ a: -0xe77508ff49bc76c78678676ca1edc82eb167f3b07d3208d89d26582ef74d60n,
+ b: -0xf005fb1a8b29035c72980abe4a0715c90n,
+ r: -0x159e8c7616079663b4e12765a05cc6d40n
}, {
- a: "fab19b7e774bf33bb42a7af90d8dc75cbc927e3225003610c05b117c25c90944d",
- b: "-3b433469282a54d46cac",
- r: "10eda9e98b721b4a2505"
+ a: 0x626749ef019017f53b2c686f26594e6d3c5f247cf0ed7024cca277n,
+ b: 0x5bd931ea8c6939115ad69f6ce3c6bbc300320n,
+ r: 0x3a4b041c36d0a45ef932745eab473943b1a97n
}, {
- a: "5b19514660782d3a2429d7791659868abb9d8fc96077247",
- b: "-59188be60",
- r: "2340c3607"
+ a: -0x960dde134b881d3366d659c7d8c3eb6346f50c08c6f78en,
+ b: -0xc9e7fa60f6ce1997012f5a6164138447fa248754985905dc6n,
+ r: -0x960dde134b881d3366d659c7d8c3eb6346f50c08c6f78en
}, {
- a: "-2884ed1401b5e976be7dc1faf7bffb632c808649fa75ab458cc66ef4e75",
- b: "bf",
- r: "-40"
+ a: 0x702dc0033e09e9e58ec3fc96f861587ad13cea1ed7b7eb301n,
+ b: 0x71f0c97687b4f3318be2fe54b90273n,
+ r: 0x4e1859254ac1d42f7c812decef00a3n
}, {
- a: "-4c9cdf26be1797e54480ab20797e35d04941c11ff78b040c00099422cec",
- b: "ee20f34835529f4a73",
- r: "-e24db40426d47f968b"
+ a: 0x39376649769a0abb26133deee934330ef21a2f90ea89476569630021af6023235n,
+ b: -0x4aaaec4085f65ccc9d547beb122160afcbb3n,
+ r: 0x3027d5ebb37c5aa438f11dfcafa3b003b91bn
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a % b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a % d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: %");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/mul.js b/deps/v8/test/mjsunit/harmony/bigint/mul.js
index 83a6700768..c6a9ae6148 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/mul.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/mul.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "-be5e349bf5ad48e7a5d",
- b: "a47a19a48667620a82973579739e08c64651b3ede2c578ff975e",
- r: "-7a4f25aa8725368922062fc91a110352a660657ffb3a489126ded10ccd29d929120c926"
+ a: 0x2bf1f236c2df29f7c99be052dfe1b69ae158d777fea487af889f6259f472c0n,
+ b: -0xae0090dfn,
+ r: -0x1dde8b7f987479d7b308e2b8a122703d417f921fd4ab04fcac81439ace4f445c7bf540n
}, {
- a: "912d5d85aec5ec6caf90514f0ec005f711caac82960c0ee911ef30f145107ae236783e6",
- b: "0",
- r: "0"
+ a: -0x71cd629b6682b5b1c50f9436ded88468bcn,
+ b: -0xb24ce1d11726048158fb42c02e9361973193n,
+ r: 0x4f42fbebca0bc1746d114996d9c9cceb4065c347da31dc0efa88db41f1f724aef81ff4n
}, {
- a: "-87c17f6930bdef7e7c7ae7dab8be3baa2e7d67",
- b: "285af6503c42d077a0d59558deb170bad",
- r: "-1566788db7747e33ddb6540190617b57856e5f3924447f9f400f34a9aa75aa7d20f2b9b"
+ a: 0xef7c15fb2256338bd0650fa51c28551108ccb9828850f1b415d0c8f1a47fc0e2f86n,
+ b: 0xc17n,
+ r: 0xb4f552dbf2c20292f5475d025276b9c70f16316c91e0a9a2833bf2d616bc484f808d0an
}, {
- a: "-3d0fd72abc5fb82d5cf45679812b71",
- b: "-3c537c742e17c5fac136e33f5e7edf256ef52b6df",
- r: "e63a0402966391e13e600371183f223379b66912ee706a2b369668b3e33e9a15992d6f"
+ a: 0xcc8a5a56c623d9f9be2103bc237976f26n,
+ b: 0x13a02fec7cfd49c5a081c06bbbaac5c70dc3an,
+ r: 0xfae417740ef1963ad78aa4252159d986593845690f4f5fe759f6d913d79810473d69cn
}, {
- a: "dfd77cf4b0d071df2487c9b77959ee027380a159b0",
- b: "-56300fb05cba28d5150892ff66e77",
- r: "-4b5c6a1976d3a318cb9f93e154dc362da4e740fa31d82baefead60c355bfea99eaa50d0"
+ a: -0x519n,
+ b: 0x7de5551df3ec2619a89139153663a79f8bdb47047423c8070d7cfddc26d43af8ee9n,
+ r: -0x281c60ee5b06ece38cc4c4bfd2242017c4ff0cd05b40a66abf3c22a1741efe09ef81c1n
}, {
- a: "-bea52261387bb2aaf8c61b5ee7bbf85b3bbbedfe60773ed9873cbceba078d3fecbb",
- b: "-2ad7",
- r: "1fe73453d2eabb331a676ede8eb9759c8c6bffe09c76947578e08b0152379841d867f0d"
+ a: 0x6fb9ba0d0n,
+ b: -0x86b45ffb80fbf2b61abc14b28855780f83e187fd6ae26e09d28d6f05260e1n,
+ r: -0x3ac9efdd4e930fcbf654819060b858f182f7768db6bc72deb8d734d2ea10fa02b656d0n
}, {
- a: "5b5a3a047d8f7e1519d6d92d3241c0f32c4d789",
- b: "-4bd3e8c09b0ba71bc25416877a4c7135",
- r: "-1b0f0d6ba20fe60049c4a172e8b1fb9824c1e85e21f1ebe08556b7074d8d4f4ac90185d"
+ a: -0xde2e7a0883a63d69d2120108f92943e4a34b63e8468c12b38a0519a6498n,
+ b: -0xb1857781fefn,
+ r: 0x9a11f83db6da31f78a2415d3891f79a0f6aafc69553fb458a2bc3ae2e2f65cb09251e8n
}, {
- a: "1b2c8263d4bac8cc825657f81fca6196799aff00fa960d5c04",
- b: "-6b140fca30c8037b18d88",
- r: "-b5dbba6fba700592408e2ebbba2d4c6557d61d1201e24314f690e77c0b59c68d8b1620"
+ a: -0x2c90284ebdb5237ba7c9562f596ef9a21254a3n,
+ b: -0xb55feab8b45cc1bcdcfc5585c9114de0n,
+ r: 0x1f929eda8d33ec2ded8d76382a4cb731f805d28e827521a27154b6618a6f87ed5215a0n
}, {
- a: "dd74f1f92ab5b9e0b447b7fe9076f7",
- b: "-9dbb0acb24e6336fe4f37f8e942d1d5c22dfe4f34",
- r: "-88728628dd4ed79514e720448e4f9ad422ba6ca62cd61bf779cfb3cb12afb5bbe20632c"
+ a: 0x11ffe68d26ca29eac43b64n,
+ b: -0xe3311b278fec1fb3f8f59acfd9147c773197fc54c66b25bdn,
+ r: -0xff95d530fd53e61744d86d09f9ab5401b1efb2133af5c47ffeca31aca22a4da414cd4n
}, {
- a: "-e97d2712832aa20a",
- b: "-cb98c0fa9b4b35fc7a4ebed6d3d106bb758c244eb756c75587300ad",
- r: "b9b1904d502a19d20cc7bd8e05670f667db817bb9104ef8acc747f3df6541eede4d80c2"
+ a: 0xbda74de44eeb2995a4185325014863n,
+ b: 0xf99164c7248a0372d72d82cbf65fdb345c84bca6n,
+ r: 0xb8e3712f119fefa47168950100b33aa1ec4e61a970d0eec2d2dd4b925cc4dfd709a432n
}, {
- a: "828ca2d8981f347f4bec14ba",
- b: "-8ce5b6b1c329477a0a728ed81331af1f03e3eaa1ccb2cb3",
- r: "-47da0e06c179e58b3e5d8abc4a2b274ede3a7d73c0b5fcb3690f0b544ed6c0b7120760e"
+ a: 0x47fc0bb6d761e12a1f7cf2a14f4cc777486b22516ca339641fn,
+ b: -0xd2fa2494ae242494f307n,
+ r: -0x3b531808ad9095f9f904dba47ef4a0aef54f736cc2836accb7b1b70f07ee862e8729d9n
}, {
- a: "aade382f2483a571e12cb1796bd124e21c6014261cef5d733a1b35fb01db7232c5b",
- b: "-d34a",
- r: "-8d068c8b18b90f6aaf82ed3260f68bdb06bf64613cdda736fc33395b7d16688edb9d34e"
+ a: 0x91d09f35203fcf69974d347ab718912cn,
+ b: 0xfc2d9a00611e4e1767b67db898b951e5d2bbb9n,
+ r: 0x8fa3561bff2cc00a780a71e5b16d6092296514d401c88fc54e04a91062775cb4e40cccn
}, {
- a: "34188043594e64ac10b37923ad29b4b536a4098ce76c9133155820b7843de95af",
- b: "106952",
- r: "356f6c0aa4a1a0d27dd2e8867420eddcd9fb5614450d6bb85cba0071e03e4563a0b90e"
+ a: 0x5a5b1ede360dafcac08c92f4b809e401502df5ab8569303a2n,
+ b: -0xb64d963b1ac4eb46ddd65n,
+ r: -0x40582a6083cd0feb5b33ac138feaf9250b5084e561807e56a8415c810a1bf9861d48ean
}, {
- a: "96b2a125fc041fe36aebe7dfd02a4f56b90",
- b: "-a9f9c57a732002abf0764868f297442f61d1",
- r: "-640ef4605c38f6643d60526833e7a3b7b71a72b7042434abc0ca600e7f79b3aa98e6090"
+ a: 0xf68275dc25d6af7dde130n,
+ b: 0xfe330b766e44f479e342e3315812e977d4d5e39bffe42111dn,
+ r: 0xf4c697f78dc7c9b3207b50cf2cf3035aa065e96c37c0be3345f71f34096f057e76b270n
}, {
- a: "-ae8327a77a340d2c90c93bb0ebe02d7a77d7c2a4e8d21a21d0",
- b: "268a49068f2769f949d95",
- r: "-1a45bc5e1b636b28bf716dfeaf0599cd932270796233adf7d58b8d72d06ac580c323e10"
+ a: 0x77009ed053f71n,
+ b: -0x647aecfc2af4079a586fbba99070c3f51d5465d9a4fd8c12cdab10854n,
+ r: -0x2eb5627edb1072a65a1db2d243d0bf2eeb5c3ff6ad61ec1063e9ee40b783bd06d55914n
}, {
- a: "-a0adcb953540a4f25fc97a40a2c9285bfba7e70e4ffc",
- b: "fff2302a069e8f69d00e529a81b",
- r: "-a0a5204f09eeb3ecd9fe2c6de34d0e0ed70c6d1589d95fd83de33d921a3152dfe5bcf94"
+ a: 0xccfa15bb6125b01bece5a919ab5e9128e784d63e50b5990ba8047a7effn,
+ b: 0x1a366ac48cdan,
+ r: 0x14fcf8728aa3dc5ef26b077dfc012d592c3a0cdfc3331942f921f037eea5af9eff9926n
}, {
- a: "-83771467194c5b612539be7228f366a8",
- b: "9412ce8f98510486287bc15c35883fff04d126e",
- r: "-4c0a803e9a079742969e01dbb7990566b2f5ac9658653c967b5d295f6a996ba1655ec30"
+ a: 0x4fc54a59f2n,
+ b: -0x93bbcbd684987407c4ec4a3cbdc7c4821c33ca20ca8449b0055905e9a8ffn,
+ r: -0x2e08ce533fdc7bb8a3a2280cfb116c85837b74606ed25f8daad623baa4ac7de758680en
}, {
- a: "12c3327d3d7ed4b3180cc301f98d351804451be431137fa48aa67627db867a2cd",
- b: "-ee0af6",
- r: "-11724697fa94a9caafbee6e7b778ecae17ed7ebba5575296b1fc4995a12fe5c9a8872fe"
+ a: -0xcf21626f318089a85738f7e9cdcf0a477f4d880ce58679n,
+ b: -0xc7ca8704a49fdb8f9567c68cn,
+ r: 0xa1a6d11a77d8e1b723bfa0242407a454274145c31bb6ede29bb0e3883b08951b36202cn
}, {
- a: "-5",
- b: "-a00c8e774e3d4a6fc2fa384382720700e49e3e4b882bb5c4c0dbe4cdcd92126731128b",
- r: "3203ec8548732742ecee319518c3a230477173779a8da8cd7c44b780503da5c03f55cb7"
+ a: 0x20c3cb19e51d4e2ac2c89f26e052bef8n,
+ b: -0xab2a148297108425ebd2fd17d897f647e0792fn,
+ r: -0x15e82b69a0eaed04cd140c6aed3b1f5bc0fc5de7ceddcb2d77615767b19dd82c744788n
}, {
- a: "a0a4dc9610ada50dfa633ad910a02aa20c85",
- b: "-4d7aa1dc1cc1d1767b4e25a839e7c177652",
- r: "-309e8a7c10fbc6b50f6ad012099765a35395b9d51112d50e0a8f3ac076942a9e5a0509a"
+ a: -0xcc4f48f45d8165569348fb995370n,
+ b: 0x140cc111df0a06e32335478ded744eb3a737a53409n,
+ r: -0x10005f808c07a1d91faac090aad06804b491919a9d2ed58f6caa52448ad97bd486aef0n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a * b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a * d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: *");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/neg.js b/deps/v8/test/mjsunit/harmony/bigint/neg.js
index 8cec9cc21b..2fedf297a5 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/neg.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/neg.js
@@ -7,74 +7,73 @@
// Flags: --harmony-bigint
var data = [{
- a: "58ad59aa3aa9d04d4c12493966e204ef0500d5f92ecb31",
- r: "-58ad59aa3aa9d04d4c12493966e204ef0500d5f92ecb31"
+ a: 0xcn,
+ r: -0xcn
}, {
- a: "6dbd19e4b781a8f113ae95738dda4b70ba027755052126c198d20ade97869ff",
- r: "-6dbd19e4b781a8f113ae95738dda4b70ba027755052126c198d20ade97869ff"
+ a: -0x5a3d0f6cdb7987a2d262838c05359f786a484d052529n,
+ r: 0x5a3d0f6cdb7987a2d262838c05359f786a484d052529n
}, {
- a: "d02befb1c96364a984664f85",
- r: "-d02befb1c96364a984664f85"
+ a: -0x98d8c6cbfd67b6b652b7a4670478f6706e06a3n,
+ r: 0x98d8c6cbfd67b6b652b7a4670478f6706e06a3n
}, {
- a: "86",
- r: "-86"
+ a: -0xe66ac692ff012bd0f4ca38804628f71ff411aede09c59597n,
+ r: 0xe66ac692ff012bd0f4ca38804628f71ff411aede09c59597n
}, {
- a: "0",
- r: "0"
+ a: -0x97e1e0c13c0c0c420aca92268ea802047c30570335de0000dn,
+ r: 0x97e1e0c13c0c0c420aca92268ea802047c30570335de0000dn
}, {
- a: "-f8da",
- r: "f8da"
+ a: 0x6b2eddc3b212913abed4f5c84e3eee64d6463224dff8n,
+ r: -0x6b2eddc3b212913abed4f5c84e3eee64d6463224dff8n
}, {
- a: "2b0f358b54a82fbaddc5a6e61a5d",
- r: "-2b0f358b54a82fbaddc5a6e61a5d"
+ a: -0xfcd42a712dd928deb51ab2d151fa6bee0f4dd2fa6n,
+ r: 0xfcd42a712dd928deb51ab2d151fa6bee0f4dd2fa6n
}, {
- a: "-3d32065b9bbb36ee521ff82da",
- r: "3d32065b9bbb36ee521ff82da"
+ a: -0x75ba8e0e92a05ff552f2dc3afb39a4dn,
+ r: 0x75ba8e0e92a05ff552f2dc3afb39a4dn
}, {
- a: "ca3da934e8081c457933c90",
- r: "-ca3da934e8081c457933c90"
+ a: 0x4570376e541836fab5190e08an,
+ r: -0x4570376e541836fab5190e08an
}, {
- a: "-e4d2bbdf90affad1d2a",
- r: "e4d2bbdf90affad1d2a"
+ a: 0x15aca33cfb06n,
+ r: -0x15aca33cfb06n
}, {
- a: "-290845e8f55d467e3",
- r: "290845e8f55d467e3"
+ a: 0x7ec0027910c44b791bf193c6f25487a9435n,
+ r: -0x7ec0027910c44b791bf193c6f25487a9435n
}, {
- a: "-771c77d2dd2227c30cf44f1bf3230",
- r: "771c77d2dd2227c30cf44f1bf3230"
+ a: -0x31f0d92f358618e6b29a2899bd988533838d33839fb37n,
+ r: 0x31f0d92f358618e6b29a2899bd988533838d33839fb37n
}, {
- a: "-77aa11",
- r: "77aa11"
+ a: 0xb4f84118d797244c982f1n,
+ r: -0xb4f84118d797244c982f1n
}, {
- a: "d220c8af9c97516bf5ec295585f711e020480d08ac11689726a285930",
- r: "-d220c8af9c97516bf5ec295585f711e020480d08ac11689726a285930"
+ a: 0x626n,
+ r: -0x626n
}, {
- a: "53841e699f994e1a43f7f848f34d418792191de15b78e1a852c430b2f8af6e7",
- r: "-53841e699f994e1a43f7f848f34d418792191de15b78e1a852c430b2f8af6e7"
+ a: 0x9f35c8968457d07608699df5894c0542f35b73b0b5ce8423aan,
+ r: -0x9f35c8968457d07608699df5894c0542f35b73b0b5ce8423aan
}, {
- a: "7c30187b8901bd748adc4bc243",
- r: "-7c30187b8901bd748adc4bc243"
+ a: -0xb5be1f7937895adc457f051d1f4bc74d556b432dn,
+ r: 0xb5be1f7937895adc457f051d1f4bc74d556b432dn
}, {
- a: "-e07ac5649eb741a023b0f9928d5982032f6766a479c7fbf26",
- r: "e07ac5649eb741a023b0f9928d5982032f6766a479c7fbf26"
+ a: 0xcacb413b3cab5a5f5086511728d1afbaa82ca41e69805daf47503e2n,
+ r: -0xcacb413b3cab5a5f5086511728d1afbaa82ca41e69805daf47503e2n
}, {
- a: "5ab3237bb32234bcaf8",
- r: "-5ab3237bb32234bcaf8"
+ a: -0x77ef7cbb15cee20a519a1n,
+ r: 0x77ef7cbb15cee20a519a1n
}, {
- a: "7df7",
- r: "-7df7"
+ a: -0x71ac9bfe7f5f70038c7fn,
+ r: 0x71ac9bfe7f5f70038c7fn
}, {
- a: "-518b7b",
- r: "518b7b"
+ a: -0x500a02b8fd66ee1067022c02c7241acdc42b947bfb933aa95a8d6n,
+ r: 0x500a02b8fd66ee1067022c02c7241acdc42b947bfb933aa95a8d6n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var r = -a;
- if (d.r !== r.toString(16)) {
- print("Input: " + a.toString(16));
+ var r = -d.a;
+ if (d.r !== r) {
+ print("Input: " + d.a.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
error_count++;
diff --git a/deps/v8/test/mjsunit/harmony/bigint/not.js b/deps/v8/test/mjsunit/harmony/bigint/not.js
index 7ceaa01e63..6b4b2eb713 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/not.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/not.js
@@ -7,74 +7,73 @@
// Flags: --harmony-bigint
var data = [{
- a: "3d02c87edc77722299f6559ecca038911f864a4e78c20af80f4a6d9",
- r: "-3d02c87edc77722299f6559ecca038911f864a4e78c20af80f4a6da"
+ a: 0x9f0305cd75e4n,
+ r: -0x9f0305cd75e5n
}, {
- a: "ac01894aeaf77255ede209897561ec1e3c7e916b9",
- r: "-ac01894aeaf77255ede209897561ec1e3c7e916ba"
+ a: -0xe8e9c8312f553c9n,
+ r: 0xe8e9c8312f553c8n
}, {
- a: "-7aaab657ab197f26eb6b98fe4c2c79b199a8156129ca04",
- r: "7aaab657ab197f26eb6b98fe4c2c79b199a8156129ca03"
+ a: -0x1a29f0783a66534da3c024ad1cc854073f886888fen,
+ r: 0x1a29f0783a66534da3c024ad1cc854073f886888fdn
}, {
- a: "9718579cc52befdaff1ec035b5ed03cec5c1d1678c28712cf0c9bec2c807897b74f0",
- r: "-9718579cc52befdaff1ec035b5ed03cec5c1d1678c28712cf0c9bec2c807897b74f1"
+ a: -0xfc2cc19496c1ced95be832ca5246d41c526b9fa28b88bcd39813aa336n,
+ r: 0xfc2cc19496c1ced95be832ca5246d41c526b9fa28b88bcd39813aa335n
}, {
- a: "e614366bc4e67509843254c52e13da5380b00a35aa1d233e70821f7d649ad1957db",
- r: "-e614366bc4e67509843254c52e13da5380b00a35aa1d233e70821f7d649ad1957dc"
+ a: 0x13ebn,
+ r: -0x13ecn
}, {
- a: "fb815f78e6952b500226c",
- r: "-fb815f78e6952b500226d"
+ a: 0x4c12d642b2a132f0c927ec7504b530fb45d5e249163ffdc59feb3de31881n,
+ r: -0x4c12d642b2a132f0c927ec7504b530fb45d5e249163ffdc59feb3de31882n
}, {
- a: "94404df802649cff2ea6c0996f55ec60c14f00ab29b287092389951f6227c4ec7",
- r: "-94404df802649cff2ea6c0996f55ec60c14f00ab29b287092389951f6227c4ec8"
+ a: 0x49637a624cb8782002e3e0874ad76215e188cee948c7ce7b0f66e1d0n,
+ r: -0x49637a624cb8782002e3e0874ad76215e188cee948c7ce7b0f66e1d1n
}, {
- a: "-74b42cd7bccd",
- r: "74b42cd7bccc"
+ a: -0x81cbae84e6753b885ada46c0bf72368c083fed622fn,
+ r: 0x81cbae84e6753b885ada46c0bf72368c083fed622en
}, {
- a: "da",
- r: "-db"
+ a: -0xcdf793acfdd08b6n,
+ r: 0xcdf793acfdd08b5n
}, {
- a: "3a9ade198",
- r: "-3a9ade199"
+ a: 0x88n,
+ r: -0x89n
}, {
- a: "56e766d24fd18c2241f244dedc426c0b1ae59e7ed4f06def0a75e0a5c8651e2ce87928",
- r: "-56e766d24fd18c2241f244dedc426c0b1ae59e7ed4f06def0a75e0a5c8651e2ce87929"
+ a: -0x1fn,
+ r: 0x1en
}, {
- a: "cc430c91347b22ecb1a6f1a2ceea168ffa4a9b80065bd1ec5d",
- r: "-cc430c91347b22ecb1a6f1a2ceea168ffa4a9b80065bd1ec5e"
+ a: 0x5c3278e76266b9e93d63eb4f2aa53716220aa1n,
+ r: -0x5c3278e76266b9e93d63eb4f2aa53716220aa2n
}, {
- a: "32e4b7f82d8c037d0f562296e21b1e58a",
- r: "-32e4b7f82d8c037d0f562296e21b1e58b"
+ a: -0x9f4fe05n,
+ r: 0x9f4fe04n
}, {
- a: "-526d3f1a904561f0cde1f0a2a4",
- r: "526d3f1a904561f0cde1f0a2a3"
+ a: 0xdn,
+ r: -0xen
}, {
- a: "3de5a9635a40539831c9665577e5eedbf680755e2065a0caa346759e17225",
- r: "-3de5a9635a40539831c9665577e5eedbf680755e2065a0caa346759e17226"
+ a: -0x74a9d63d02bd1393b311211e35c8b2d11e2367ffffb812683365f02d98n,
+ r: 0x74a9d63d02bd1393b311211e35c8b2d11e2367ffffb812683365f02d97n
}, {
- a: "-d912828b8d6419900",
- r: "d912828b8d64198ff"
+ a: 0x8a2831ae3e1b6b21a7b3d7ee921a0c0edf29b272d654f647169a5a6141d3aaf41439n,
+ r: -0x8a2831ae3e1b6b21a7b3d7ee921a0c0edf29b272d654f647169a5a6141d3aaf4143an
}, {
- a: "-17968ddf93",
- r: "17968ddf92"
+ a: 0xa2ef28270a7e668e190bc303537f5a8ba3f0d59bcn,
+ r: -0xa2ef28270a7e668e190bc303537f5a8ba3f0d59bdn
}, {
- a: "-c2bfd766e34923d549bbaedb4d9b7bb35a61908e6144462a",
- r: "c2bfd766e34923d549bbaedb4d9b7bb35a61908e61444629"
+ a: -0x4f8bd11ee4ef38682a0c94en,
+ r: 0x4f8bd11ee4ef38682a0c94dn
}, {
- a: "af426ec83aaafc84a94930e51a2899696a3d",
- r: "-af426ec83aaafc84a94930e51a2899696a3e"
+ a: 0x5436d0721d1282755068add3b5ab5424cb455fad50811a9afff0be5n,
+ r: -0x5436d0721d1282755068add3b5ab5424cb455fad50811a9afff0be6n
}, {
- a: "-283de5b9379a45f065d3b8662ac38faa6492bc0eea6b7e3b51591a5cc27669e",
- r: "283de5b9379a45f065d3b8662ac38faa6492bc0eea6b7e3b51591a5cc27669d"
+ a: 0x5db6907c92529f517d24a6ff7303de42cb9059ae2adc0c22n,
+ r: -0x5db6907c92529f517d24a6ff7303de42cb9059ae2adc0c23n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var r = ~a;
- if (d.r !== r.toString(16)) {
- print("Input: " + a.toString(16));
+ var r = ~d.a;
+ if (d.r !== r) {
+ print("Input: " + d.a.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
error_count++;
diff --git a/deps/v8/test/mjsunit/harmony/bigint/or.js b/deps/v8/test/mjsunit/harmony/bigint/or.js
index 4ee32657d4..c378e141cd 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/or.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/or.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "a66",
- b: "d0671cd6e4ebd7baf6e48b2529348cfa89fc9513ba30ef3f99aee07f267df163cf8",
- r: "d0671cd6e4ebd7baf6e48b2529348cfa89fc9513ba30ef3f99aee07f267df163efe"
+ a: 0x77a87n,
+ b: 0xde08e7433fb9584911b8cb4bc7eed802299b4489fc635974d063847da4e8b461df5dn,
+ r: 0xde08e7433fb9584911b8cb4bc7eed802299b4489fc635974d063847da4e8b467ffdfn
}, {
- a: "a9950e5fc429f0f93d5fa8f306f4e5da88a8c9f9",
- b: "d1fc1ac3db7ff5547462800923e616727120f74f0a6cb7bf1886dd4f4ac",
- r: "d1fc1ac3db7ff554746a9959e7fe56ff7fb3f7ff8f7cffff5daedfcfdfd"
+ a: -0x1d3ff6f353f2d035ed7b4b8e5e4ae1c8d162n,
+ b: -0xcf829b11c2f996f388b22cd03aeb75ec434f3be8fde6466n,
+ r: -0x192f308302c00024a55a4004520a81c84062n
}, {
- a: "5e277a64b6515ad69ed8935ae8dcdb6dc66f98fcbb462b10bea0db15ad6010d",
- b: "7df3",
- r: "5e277a64b6515ad69ed8935ae8dcdb6dc66f98fcbb462b10bea0db15ad67dff"
+ a: 0x6dbbc93af9a9c222187dn,
+ b: -0xfaa906348dc49859c34bc7c6n,
+ r: -0xfaa9020404c400500149c781n
}, {
- a: "3b8368196588e684403965902763d66aa",
- b: "-edf58c5ab418f49cf9fdb7f3b1c416a03c1dfbe90ba7ea6373c",
- r: "-edf58c5ab418f49cf9c43493a0801600381dc2880b808821112"
+ a: 0xf8n,
+ b: 0x4388532n,
+ r: 0x43885fan
}, {
- a: "-5587f5e86137f8ea4d7259acdd0b77a26ea069385501c9985df6a5fcd3c",
- b: "9878871628ea5cb66",
- r: "-5587f5e86137f8ea4d7259acdd0b77a26ea069385501811849d605a041a"
+ a: -0x3ee35e1823b91414618f05995e11594920539921e9440n,
+ b: -0x58c5811ee19db01b7d9824c49682af55956119cfbc9868287ef138da08ee3n,
+ r: -0x3c80040002800414010101891c1048082051180008423n
}, {
- a: "-dc65679b1ea7c86c10890e6d6be3bd069b4c7",
- b: "83ea9",
- r: "-dc65679b1ea7c86c10890e6d6be3bd0618047"
+ a: -0xa2a70c5da9a7e98f242e82d518n,
+ b: 0n,
+ r: -0xa2a70c5da9a7e98f242e82d518n
}, {
- a: "-755f422bfb614b7ed2c8e05cd1e0e0a",
- b: "-d3185fac5454a495d7b149e67df4436339e060d924d",
- r: "-24154221496049744240204040c0209"
+ a: 0x7868475f450ff2b15a03eccb4d26ce8711383f615cn,
+ b: 0x1c62774e1db239cb461c4190b54de4d872f9484cf82ed1258cc14580cf29f608n,
+ r: 0x1c62774e1db239cb461c41f8fd4fffdd7ffbf95efbeedb6daecfc791ff3ff75cn
}, {
- a: "-1dfdf84b41ddd069053",
- b: "f9b2bc80b580311773e9a5d57e8f24ace46bd2a0fce24404db684efa8692638b5d604e6",
- r: "-1105784900548009011"
+ a: 0x865d033028551a71f6f9f8d7b36cf9819n,
+ b: -0x68e30391d9831a4ea3e65b2e433f55ba398n,
+ r: -0x68610290c9830a4482000206000c1102387n
}, {
- a: "18d1b78380aa9016029417c2ebe77a",
- b: "-b63b35e6711dcbf00dc02cd936",
- r: "-3835446109c9600800041806"
+ a: -0x817a0dacbafbaf40ef819fc62a8efc4b4960n,
+ b: -0x5af313e32a5386e29cb5d2b34d54f04da389f33d52444f177671e41n,
+ r: -0x12a04840a008a008f019504008074430841n
}, {
- a: "-9981f",
- b: "-5d876576146a2d5dc8d52d26ea3304287af0922685f8e1a46875e80f24a470",
- r: "-800f"
+ a: -0xef8c9475210c0a31aa12c34db6e7737609c75b78a54cn,
+ b: -0xba91b4ec3a5390db84febaeaddb8209155413e2e02fb0n,
+ r: -0xa9080441210808300a02820d9282011400034260250cn
}, {
- a: "-20f8052991bc5a8f2631c9e4b34aa9073a69913185a539d719",
- b: "a59fdaa025b42",
- r: "-20f8052991bc5a8f2631c9e4b34aa9073a6991200005398419"
+ a: -0xf4e707078d14001959f4n,
+ b: 0n,
+ r: -0xf4e707078d14001959f4n
}, {
- a: "-d02620570",
- b: "-3b14d75fb9d9b95d13f884a82c9f16",
- r: "-400200506"
+ a: 0x601f0n,
+ b: 0x246fbfn,
+ r: 0x266fffn
}, {
- a: "-8",
- b: "-4",
- r: "-4"
+ a: -0x9ccd3d1b6d4bcde8643ad641d395980bn,
+ b: -0x379e57728185fd098383a23c7f56dn,
+ r: -0x13901650808484018100003859009n
}, {
- a: "e0e8ab319d1f051560e1155ae5789dd4d9b638e07e5a57c3432e6cb9239d",
- b: "85c9cd1f09436dc45ac783f31a21a1ff4e11ceca00cc164",
- r: "e0e8ab319d1f0d5dfcf1f5def7fcddfcf9bf39e27e5ff7e35feeecbde3fd"
+ a: 0xcc4b3ba719bd1b37f254f36a72ee375ad22abn,
+ b: -0xb0c220750f2dad9de91ffb8a7bbf8ffefen,
+ r: -0x4000640e0c8098a0095880188a02dc55n
}, {
- a: "8131173cb5597e2ae560cae6d0907f004792b1b1c7",
- b: "-2ac290724a7c86",
- r: "-c290604a4c01"
+ a: 0xa230c33b718cd563f9c1577f4f8da160851902341ba1a6e6bdcbec413d98a18n,
+ b: 0xc2f4e2db2df59ccc34690479ebe64df967n,
+ r: 0xa230c33b718cd563f9c1577f4f8dad6fcf3db2ff5bede7e6bdcffeff7ddfb7fn
}, {
- a: "bdb24fd4d78b01d77e41d95f2c3eedeb2bf919",
- b: "-97f6ccbd94d64ada501e0f288568",
- r: "-14f60881940600d2401204000467"
+ a: -0x5fbac9a8f47n,
+ b: 0xf1bfe6f97e8f516685372b06ea89659c3df4ab3f6779e5c0b41e1b570fn,
+ r: -0x54ba0808841n
}, {
- a: "-d5ad86f9a4808394f6e7dcd2a67bd3e6a9",
- b: "1a80309934942e4e55fbf2ba4d1d2f8cc4697338097e2c1b7ce013da8a79965974286",
- r: "-408c066804000010f6c25450261a40a429"
+ a: -0x7fd2d4a4c60ba795e2fcn,
+ b: 0x33dcea557cc5156dacb9ad7b252598172f92d8cf7d38e69f0a0n,
+ r: -0xd02d02000082194025cn
}, {
- a: "c7a0086120a1613a28456",
- b: "b8",
- r: "c7a0086120a1613a284fe"
+ a: -0x3833089d7cf4e0181247572037a90cc2506342a2191137345e3a0e10n,
+ b: 0xededf5c7e54cd86afc6d838205c8a78cac7a0e410792a21cf3e4e38dd8ca2dd80n,
+ r: -0x283208103824c01802450720300908c2004142200111072052180210n
}, {
- a: "c8480c",
- b: "0",
- r: "c8480c"
+ a: -0x188818a47abbfe64357ccd1a33fb5cb875f86n,
+ b: -0x14faf1811ee737d048c025f7a1fe156f7e90d23a699d040609e631836500df2e30cdn,
+ r: -0x108808202299d040201c411832500cb061085n
}, {
- a: "ec8913fc89aa7a47672bc0f5269e8629cabf2dba88836cb3a9",
- b: "-52594e7",
- r: "-4010447"
+ a: 0xf60c0260022920bdbd1c837778657936956c15b4cb1n,
+ b: 0xa10df3b397c24n,
+ r: 0xf60c0260022920bdbd1c8377786579b79dffb7b7cb5n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a | b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a | d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: |");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/sar.js b/deps/v8/test/mjsunit/harmony/bigint/sar.js
index 7feb8aebcd..f66115dcb6 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/sar.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/sar.js
@@ -7,100 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "-4efa0d1f8a127",
- b: "-66",
- r: "-13be8347e2849c0000000000000000000000000"
+ a: 0x211a34fn,
+ b: 0xa6n,
+ r: 0n
}, {
- a: "-100000001",
- b: "20",
- r: "-2"
+ a: 0xa0d0aff5d2783ad192f37423n,
+ b: 0x7n,
+ r: 0x141a15feba4f075a325e6e8n
}, {
- a: "853cd87b0bd5c046aecbf4b3d",
- b: "-96",
- r: "214f361ec2f57011abb2fd2cf40000000000000000000000000000000000000"
+ a: -0x68ba010a12ed551fc17c67f63331eea3n,
+ b: 0xae5n,
+ r: -0x1n
}, {
- a: "-4bc82dba903fedec0a079f7ae4fa6bd6befa",
- b: "0",
- r: "-4bc82dba903fedec0a079f7ae4fa6bd6befa"
+ a: 0xb64dc8009n,
+ b: 0xf1n,
+ r: 0n
}, {
- a: "43969b4db0d921d9f0ca68f74e4e4b9073732a7955a5b4571",
- b: "-91",
- r: "872d369b61b243b3e194d1ee9c9c9720e6e654f2ab4b68ae2000000000000000000000000000000000000"
+ a: -0x364cdec628e438712e4f8cb7an,
+ b: 0x48cn,
+ r: -0x1n
}, {
- a: "495f57",
- b: "-a5",
- r: "92beae00000000000000000000000000000000000000000"
+ a: -0xb417099aba80a783f52790b779b38c2e6fbdf5f490d7b02a59e81032een,
+ b: 0x6a3n,
+ r: -0x1n
}, {
- a: "-22109b99d3025aaef5c3fbd27420a72",
- b: "45d",
- r: "-1"
+ a: 0x9d2b41n,
+ b: 0n,
+ r: 0x9d2b41n
}, {
- a: "b3f6b156f4afcf259efd3cd1",
- b: "c7",
- r: "0"
+ a: -0x4a070c83b02ba86434228a50cbaf50ddf38n,
+ b: -0x63n,
+ r: -0x25038641d815d4321a11452865d7a86ef9c0000000000000000000000000n
}, {
- a: "137aeeadc8d1395042e80393cc1b6a1c7b6e526ab1b6fc2f2859fd70e0c29df2802",
- b: "f49",
- r: "0"
+ a: 0x3e76514036ccb958c98f60n,
+ b: -0x76n,
+ r: 0xf9d94500db32e563263d8000000000000000000000000000000n
}, {
- a: "70f51026476e43bd7e911d37a4553701",
- b: "33",
- r: "e1ea204c8edc877afd2"
+ a: -0x51fe20d47ba564bc09337d5c8d284deb5006549bad2629230198f34n,
+ b: -0xfn,
+ r: -0x28ff106a3dd2b25e0499beae469426f5a8032a4dd693149180cc79a0000n
}, {
- a: "-3f935a89481c85d666498cf64fdc2a57028f7b295621dc665c0442229563",
- b: "-2",
- r: "-fe4d6a2520721759992633d93f70a95c0a3deca5588771997011088a558c"
+ a: 0x49adce1d09dadc5a0635f24d066d3a29e37b9be6059b88d0239d6ca6d92267a372n,
+ b: -0x6n,
+ r: 0x126b73874276b716818d7c93419b4e8a78dee6f98166e23408e75b29b64899e8dc80n
}, {
- a: "-c3",
- b: "-87",
- r: "-618000000000000000000000000000000000"
+ a: 0xca19971406fb07cn,
+ b: -0x17n,
+ r: 0x650ccb8a037d83e000000n
}, {
- a: "aae225520f630c0dfbb815f121836612d75a1f65a301461cd05ad0a741496",
- b: "-4",
- r: "aae225520f630c0dfbb815f121836612d75a1f65a301461cd05ad0a7414960"
+ a: 0xd8de8f631313b1f98f77d265ee48014f82eb20n,
+ b: 0xen,
+ r: 0x3637a3d8c4c4ec7e63ddf4997b920053e0bn
}, {
- a: "a5348f9af939041cc6ed386c060619a42f30c4aa8",
- b: "95",
- r: "529a"
+ a: -0x550d3470c8ad9b8f22ed01a4b1d3f648n,
+ b: -0x4dn,
+ r: -0xaa1a68e1915b371e45da034963a7ec900000000000000000000n
}, {
- a: "-4c27fc7e3892a6a5b517",
- b: "-6c",
- r: "-4c27fc7e3892a6a5b517000000000000000000000000000"
+ a: -0xa7eadcd4a1b8037081952f0a9199n,
+ b: -0xcfn,
+ r: -0x53f56e6a50dc01b840ca978548cc8000000000000000000000000000000000000000000000000000n
}, {
- a: "98efd35f2239f7efde9aef42ad0acd835e68ad868a2cd8fac260f1c7496e3fd2ada76",
- b: "0",
- r: "98efd35f2239f7efde9aef42ad0acd835e68ad868a2cd8fac260f1c7496e3fd2ada76"
+ a: -0x65b3933c9e1eb5f953f6cc55cn,
+ b: 0xa6cn,
+ r: -0x1n
}, {
- a: "-92f0264c863bdf66d4c83e8bf812123d759b4",
- b: "-96",
- r: "-24bc0993218ef7d9b5320fa2fe04848f5d66d00000000000000000000000000000000000000"
+ a: -0x92cea858475460661a2831a28434bde95e5132e1dcaeb161ec89bd9555f41n,
+ b: -0xbn,
+ r: -0x4967542c23aa30330d1418d1421a5ef4af289970ee5758b0f644decaaafa0800n
}, {
- a: "ec6341ff2b0e9cf8721e2eb4ec9c9",
- b: "74",
- r: "0"
+ a: -0x1d838ade17fe571916a26314b6a8b42e9b6e2b74238de734d6f5575df7n,
+ b: -0x7n,
+ r: -0xec1c56f0bff2b8c8b51318a5b545a174db715ba11c6f39a6b7aabaefb80n
}, {
- a: "-32de8dced947fa55cd0b91332a81f70",
- b: "-5b",
- r: "-196f46e76ca3fd2ae685c8999540fb800000000000000000000000"
+ a: 0xdd5bf8581e32875d44e26ef10c45bdff68d209015933586dc37211n,
+ b: -0xa9n,
+ r: 0x1bab7f0b03c650eba89c4dde2188b7bfed1a41202b266b0db86e422000000000000000000000000000000000000000000n
}, {
- a: "-3ef43bf8610f6533526ba734e85eafa04cd50a3",
- b: "-25",
- r: "-7de877f0c21eca66a4d74e69d0bd5f4099aa146000000000"
-}, {
- a: "-9979591a367a32ae0039c54fd0f3d9e0ccc80be52b7e517fc94d9f3587dc54d933bb",
- b: "0",
- r: "-9979591a367a32ae0039c54fd0f3d9e0ccc80be52b7e517fc94d9f3587dc54d933bb"
+ a: -0xa92d3014dcn,
+ b: -0x4cn,
+ r: -0xa92d3014dc0000000000000000000n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a >> b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a >> d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: >>");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/shl.js b/deps/v8/test/mjsunit/harmony/bigint/shl.js
index 1b0f309f88..bedd785b54 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/shl.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/shl.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "-9a6d035348727045f6abf7d59056d30e9ce885e87f5f8438347bfcda0a1f9b",
- b: "-2",
- r: "-269b40d4d21c9c117daafdf56415b4c3a73a217a1fd7e10e0d1eff368287e7"
+ a: -0xe813d76adc0a177778c0c232c595e8572b783210f4a7009d7c1787n,
+ b: 0x9en,
+ r: -0x3a04f5dab70285ddde30308cb1657a15cade0c843d29c0275f05e1c000000000000000000000000000000000000000n
}, {
- a: "615f9676062ea7a1b89396ce4208712f279475490829",
- b: "ff",
- r: "30afcb3b031753d0dc49cb672104389793ca3aa484148000000000000000000000000000000000000000000000000000000000000000"
+ a: -0xded00dff14554cn,
+ b: 0xd9n,
+ r: -0x1bda01bfe28aa98000000000000000000000000000000000000000000000000000000n
}, {
- a: "-9b6131d8b806543fce32b4c2ca2038ffa956929848a61b5eb7f",
- b: "-e7",
- r: "-1"
+ a: -0x50c2df089209be37998c8f6e30c4c95a61e77a9241n,
+ b: 0x46n,
+ r: -0x1430b7c224826f8de66323db8c3132569879dea490400000000000000000n
}, {
- a: "-331d9e",
- b: "0",
- r: "-331d9e"
+ a: 0x668086293eab52b52b879f00375d0f513f1ff9a93df788e38527en,
+ b: -0x3n,
+ r: 0xcd010c527d56a56a570f3e006eba1ea27e3ff3527bef11c70a4fn
}, {
- a: "cb79696d3a6f5d5d034e9d2",
- b: "-d33",
- r: "0"
+ a: 0n,
+ b: -0x1n,
+ r: 0n
}, {
- a: "ca99",
- b: "10",
- r: "ca990000"
+ a: 0x26bd28fb3e06bb0ddan,
+ b: -0xcn,
+ r: 0x26bd28fb3e06bb0n
}, {
- a: "6f97833d5",
- b: "0",
- r: "6f97833d5"
+ a: -0xa04dca5c74bdb1dbf2f0n,
+ b: -0x3fn,
+ r: -0x1409cn
}, {
- a: "67d36e7948d18af35f0823c0d58ba47ca0846cdfaa7a7407f09d44747275532681b343",
- b: "f",
- r: "33e9b73ca468c579af8411e06ac5d23e5042366fd53d3a03f84ea23a393aa99340d9a18000"
+ a: -0xd6b46ec3ead311e2e45ca4ae8aa9cf5acf4c2d6c61de06e9e36n,
+ b: -0xan,
+ r: -0x35ad1bb0fab4c478b917292ba2aa73d6b3d30b5b187781ba8n
}, {
- a: "f4896",
- b: "-7",
- r: "1e91"
+ a: -0x1f1922aen,
+ b: -0xbfn,
+ r: -0x1n
}, {
- a: "996ce2a9e0f7d65e0523204c9c469bfd14821efe571ac59cdc01",
- b: "1d",
- r: "132d9c553c1efacbc0a464099388d37fa29043dfcae358b39b8020000000"
+ a: 0x47n,
+ b: -0x2n,
+ r: 0x11n
}, {
- a: "-f8f",
- b: "f1",
- r: "-1f1e000000000000000000000000000000000000000000000000000000000000"
+ a: -0xf64n,
+ b: 0x7en,
+ r: -0x3d900000000000000000000000000000000n
}, {
- a: "-b685bbcd953ba9c5973ae523dc81d7b35e0cf2b9b51026d4ba1ac21bd5c3c18f9c13",
- b: "0",
- r: "-b685bbcd953ba9c5973ae523dc81d7b35e0cf2b9b51026d4ba1ac21bd5c3c18f9c13"
+ a: -0x78dn,
+ b: -0x85en,
+ r: -0x1n
}, {
- a: "e2295b362b7048fb163d1272178ed441517fc689e5ec5ea40f29",
- b: "-30",
- r: "e2295b362b7048fb163d1272178ed441517fc689"
+ a: -0xa03bc462b00e4113a2472eb73dn,
+ b: 0n,
+ r: -0xa03bc462b00e4113a2472eb73dn
}, {
- a: "-b322e816b014448f44e60b418582390d2a3ad95",
- b: "0",
- r: "-b322e816b014448f44e60b418582390d2a3ad95"
+ a: 0x3816aa0f01396f4abe0ee7164e211ce60ce590b615d86f6e6363c0bdd513d8a0n,
+ b: 0x4n,
+ r: 0x3816aa0f01396f4abe0ee7164e211ce60ce590b615d86f6e6363c0bdd513d8a00n
}, {
- a: "4c135e4d7",
- b: "0",
- r: "4c135e4d7"
+ a: -0xaf8711d29fdecd42a255f14aafe13b9404cced5df0af8b86080af7682cben,
+ b: 0x1n,
+ r: -0x15f0e23a53fbd9a8544abe2955fc277280999dabbe15f170c1015eed0597cn
}, {
- a: "-d5b694",
- b: "f1",
- r: "-1ab6d28000000000000000000000000000000000000000000000000000000000000"
+ a: 0x218b11dfn,
+ b: 0n,
+ r: 0x218b11dfn
}, {
- a: "-7994be7",
- b: "-d",
- r: "-3ccb"
+ a: 0x263fcb4707e6fd4a21b1f646ddd90a5e78d781c272621dcaf861d6250d7be675e067n,
+ b: -0x868n,
+ r: 0n
}, {
- a: "a6443add555ea15af90092e8",
- b: "42",
- r: "29910eb75557a856be4024ba00000000000000000"
+ a: 0x8312b459b585847682en,
+ b: 0x4en,
+ r: 0x20c4ad166d61611da0b80000000000000000000n
}, {
- a: "9385ed",
- b: "e5",
- r: "1270bda000000000000000000000000000000000000000000000000000000000"
+ a: 0xb45d42e0595a4b104b0b9a177ffdc52b401b61659fe163e720621aef8c8e6191ba7an,
+ b: 0x3n,
+ r: 0x5a2ea1702cad25882585cd0bbffee295a00db0b2cff0b1f390310d77c64730c8dd3d0n
}, {
- a: "-531",
- b: "7d",
- r: "-a620000000000000000000000000000000"
+ a: -0x42d2c4n,
+ b: 0xfdn,
+ r: -0x85a588000000000000000000000000000000000000000000000000000000000000000n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a << b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a << d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: <<");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/sub.js b/deps/v8/test/mjsunit/harmony/bigint/sub.js
index f0af2ca930..a1ff9b4bb3 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/sub.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/sub.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "e5e5071838eb1314",
- b: "3b7f55dce703a25ea14fdea6186156f775dec5d29f6edb3a014",
- r: "-3b7f55dce703a25ea14fdea6186156f775d067822deb4c88d00"
+ a: 0xc4fd438551d58edn,
+ b: 0x91b42ee55a50d974an,
+ r: -0x90ef31a1d4ff03e5dn
}, {
- a: "-f",
- b: "22d6805c7201a8ad4b9e6c2a7e8b5ab3bac",
- r: "-22d6805c7201a8ad4b9e6c2a7e8b5ab3bbb"
+ a: -0x86b766ec276e390087458ce093b07b2574732d5f3f5125n,
+ b: 0x4b1fe0dd23316de1b38b343d4112fbd7e8c8d081de5dbabda7357fd9e868466693den,
+ r: -0x4b1fe0dd23316de1b38b34c3f879e7ff5701d10923ea9b5157b0a54e5b95a5a5e503n
}, {
- a: "-22",
- b: "-11a0adfaedd5adb92297af1c3794ef5461dd8bc146db3",
- r: "11a0adfaedd5adb92297af1c3794ef5461dd8bc146d91"
+ a: 0x63e1eac0d5470c8f802ec390b6415c43ed16d28d6dn,
+ b: -0xbe26927b2f6ae85b5d3062d869d22da005805d23142a236955127b6ca688c07fn,
+ r: 0xbe26927b2f6ae85b5d30633c4bbcee754c8ceca342edb41f966ebf59bd5b4decn
}, {
- a: "-d20c39d0",
- b: "-46faa9d3eabcbd8b6d07adc2d0ff289d2",
- r: "46faa9d3eabcbd8b6d07adc2c3de65002"
+ a: -0x873cf988993d97e5180fn,
+ b: -0xd791aa365f881a7f05f21f73ef390c0b37408dd0a908801682464178426an,
+ r: 0xd791aa365f881a7f05f21f73ef390c0b37408dd021cb868de908a9932a5bn
}, {
- a: "-e5b56109a11",
- b: "211e1dcdf52f020ab0f16e18cc4e46027d05bfa3155b88973e630ae9a75bf2c7fbad269",
- r: "-211e1dcdf52f020ab0f16e18cc4e46027d05bfa3155b88973e630ae9a75cd87d5cb6c7a"
+ a: -0xcb273d5b123eff101c62f016ae2896223b93b9114e40d1fn,
+ b: 0x2169f8c954623f8381bf12fd0927e1a645f5b3c6c00e58105c297ba40446208dn,
+ r: -0x2169f8c954623f838e7186d2ba4bd19747bbe2c82af0e1727fe2b735192a2dacn
}, {
- a: "-b682aa",
- b: "-5fa59a6a80d39c0c885c030e9c8c84ec7",
- r: "5fa59a6a80d39c0c885c030e9c811cc1d"
+ a: -0xe965bf745fffcf22n,
+ b: 0x7c3b92f8a030cbfca7cc5f2fd454459c79a3ac2201524468fbn,
+ r: -0x7c3b92f8a030cbfca7cc5f2fd454459c7a8d11e175b244381dn
}, {
- a: "-c1325b8ab9fea966f093bbfbc2e611b0e5bf0b13ce047c7133056d4eea",
- b: "f97d5c4014c5cc87923c344a",
- r: "-c1325b8ab9fea966f093bbfbc2e611b0e6b888700e19423dba97a98334"
+ a: 0n,
+ b: 0x6975a8ededffd265f472e660e18n,
+ r: -0x6975a8ededffd265f472e660e18n
}, {
- a: "-1872900ab729911a3c021db53672eda07a9ad623",
- b: "152d13997090c43551edfc89d4c7ea5e9ffee4a114085858892e67e82edea6384aaaba7",
- r: "-152d13997090c43551edfc89d4c7ea602727e54c86a169fc4950433b960d803ff4581ca"
+ a: -0x236bda1a3a9635190f26c3739b7342f33a27cd2568n,
+ b: 0x1a7e6n,
+ r: -0x236bda1a3a9635190f26c3739b7342f33a27cecd4en
}, {
- a: "5440388fc10de9",
- b: "-4b",
- r: "5440388fc10e34"
+ a: 0xb682b9c88f0b6d7b5e9a4f77dd0d7885d62cd162783b2e55b30b954e10caf3n,
+ b: 0x2c6574161d9aff7bbebd95e561fbd5d70d893e8c9772bcc697cddn,
+ r: 0xb682b9c88c451639fcc09f8021219f27800d140507629a6ce9946981a74e16n
}, {
- a: "-198dc54795a81722f70acc9cc20505492172c7819ba168e57d",
- b: "-48f3b40bf850f771d44e423eb266846801d9e4e920c",
- r: "-198dc543066cd66371fb557f7d20e15dfb0a81017e031a5371"
+ a: 0xdd6aa5240c4n,
+ b: 0xb9c7bef20c0cc0f814cce3c40d43e4c26en,
+ r: -0xb9c7bef20c0cc0f814cce3b636999281aan
}, {
- a: "c78082429b3163ce243c778",
- b: "-97afe29",
- r: "c78082429b3163cebbec5a1"
+ a: 0x35cd14329d5f81dbn,
+ b: 0x25997b1f3500f9b27f2ceean,
+ r: -0x25997b1bd82fb688a934d0fn
}, {
- a: "-50df",
- b: "-d5352ec9c1b0e62b97ea1363ce8b72",
- r: "d5352ec9c1b0e62b97ea1363ce3a93"
+ a: -0x55ec15d7ed316df3fc7bcfcce0f0a6d7034113bad6d74bae88d4aab03032n,
+ b: 0x9d09b8869aab44d6690fad48c8dffd0e18cc21fb26c2a80ac08fd4f42296f46d37cddn,
+ r: -0x9d09b886a00a0633e7e2c42808a7ba0ae6db2c6896f6b9466dfd49af0b243f183ad0fn
}, {
- a: "-5c9777f93d64636ff8bcda39125625aa58a49e9a4f29ece2b7afa5d",
- b: "894bb7aa90b8687e6290c3218a4258dac9400d556caafe02cf8c312c053f2fc73",
- r: "-894bb7aa9114fff65bce2784fa3b15b50252637b1703a2a169db5b18e7f6df6d0"
+ a: 0n,
+ b: -0xfbf3b308a6066d497e1cn,
+ r: 0xfbf3b308a6066d497e1cn
}, {
- a: "-e15c51f0627e460c477",
- b: "-dfd13bac43ebe2f8e77f5b31314843",
- r: "dfd13bac43ddcd33c879334cd083cc"
+ a: 0x6acfc1e4b681be9582cd28453387ed2c29f5eb4cd6d78aaac67n,
+ b: -0x4f5997n,
+ r: 0x6acfc1e4b681be9582cd28453387ed2c29f5eb4cd6d78fa05fen
}, {
- a: "0",
- b: "adbd3e4b06b92771ae25eb52fca5fc86391303ebf7962",
- r: "-adbd3e4b06b92771ae25eb52fca5fc86391303ebf7962"
+ a: -0xc93eb5cae0c90eb77e702ccb44eea6180829d0b360872n,
+ b: 0xdn,
+ r: -0xc93eb5cae0c90eb77e702ccb44eea6180829d0b36087fn
}, {
- a: "960a8aa627a1c48721f4e0",
- b: "-9e8742ae61615481cdd12f0728f0b61",
- r: "9e8742ae6ac1fd2c304b4b4f9b10041"
+ a: -0x324ef140a16c5c20a9a189a720136f4836ea98056n,
+ b: 0x49471582a658bf5524128e130b3378d29331baa8eb8n,
+ r: -0x49796473e6fa2bb144bc2f9cb2538c41db68a540f0en
}, {
- a: "-abf5cf9ff3c15b0645009",
- b: "-e805773176aaa712d144e172db033c64aeaddf3380b2",
- r: "e805773176aaa712d144e1681ba6426572982ecf30a9"
+ a: 0x6f15ac8490de4e308286fdde1142ad0052c2cn,
+ b: 0x9ca6c715de18d7f64a751452e967b5807eec951777245ce5fe3n,
+ r: -0x9ca6c715de18d78734c88fc20b1984fdf7eeb70634775c933b7n
}, {
- a: "349ebb89b13a7149ec1a4e067574c3825f90ec4e4413948b808c6a",
- b: "-44cdc0e3efa11513525f68163033a59d7b0610070",
- r: "349ebb89b13a7596c8288d0086c5f8a856124f517e6d6c3be18cda"
+ a: -0xc303adn,
+ b: 0x5ec0a706a54978a59b10007942e563957ad22e708678fbfdb02862b2c3n,
+ r: -0x5ec0a706a54978a59b10007942e563957ad22e708678fbfdb02925b670n
}, {
- a: "a86c53e8c49b20cff057882c4345c40f5c34a8cb8",
- b: "-76453703c781511b52e663",
- r: "a86c53e8c49b20cff05eec7fb3823c246de9d731b"
+ a: -0xdb736b5f3979c24f70aafa5f17711d725d552a9778e1n,
+ b: 0x70b56a0773498a45b796cf79ea18211614cn,
+ r: -0xdb736b5f408518efe7df930372ea8a69fbf6aca8da2dn
}, {
- a: "-2647d3c",
- b: "776e5b3a57bd5196be1b9c99ae899d949cb2b94310c53be8910db71b",
- r: "-776e5b3a57bd5196be1b9c99ae899d949cb2b94310c53be893723457"
+ a: 0xe91b9a797168c6b7440f946n,
+ b: -0x2588c1301521dd9646a2e22e3aca462ca95e76069be2f7b95d8bb81ccn,
+ r: 0x2588c1301521dd9646a2e22e3aca462caa4791a11554608014cfc7b12n
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a - b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a - d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: -");
diff --git a/deps/v8/test/mjsunit/harmony/bigint/typedarray.js b/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
new file mode 100644
index 0000000000..29713b8a20
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
@@ -0,0 +1,240 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-bigint --allow-natives-syntax
+
+var intarray = new BigInt64Array(8);
+var uintarray = new BigUint64Array(8);
+
+function test(f) {
+ f();
+ f(); // Make sure we test ICs.
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+}
+
+function test_both(f) {
+ test(() => f(BigInt64Array));
+ test(() => f(BigUint64Array));
+}
+
+test(function basic_assignment() {
+ const x = 0x1234567890abcdefn;
+ intarray[0] = x;
+ assertEquals(x, intarray[0]);
+ uintarray[0] = x;
+ assertEquals(x, uintarray[0]);
+ const y = -0x76543210fedcba98n;
+ intarray[0] = y;
+ assertEquals(y, intarray[0]);
+});
+
+test(function construct() {
+ var a = new BigInt64Array([1n, -2n, {valueOf: () => 3n}]);
+ assertArrayEquals([1n, -2n, 3n], a);
+ assertThrows(() => new BigInt64Array([4, 5]), TypeError);
+ var b = new BigUint64Array([6n, -7n]);
+ assertArrayEquals([6n, 0xfffffffffffffff9n], b);
+ var c = new BigUint64Array(new BigInt64Array([8n, -9n]));
+ assertArrayEquals([8n, 0xfffffffffffffff7n], c);
+ var d = new BigInt64Array(new BigUint64Array([10n, 0xfffffffffffffff5n]));
+ assertArrayEquals([10n, -11n], d);
+ assertThrows(() => new BigInt64Array(new Int32Array([12, 13])), TypeError);
+ assertThrows(() => new Int32Array(new BigInt64Array([14n, -15n])), TypeError);
+});
+
+test_both(function copyWithin(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ a.copyWithin(0, 1, 3);
+ assertArrayEquals([2n, 3n, 3n], a);
+});
+
+test_both(function entries(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ var it = a.entries();
+ assertEquals([0, 1n], it.next().value);
+ assertEquals([1, 2n], it.next().value);
+ assertEquals([2, 3n], it.next().value);
+ assertTrue(it.next().done);
+});
+
+test_both(function every(BigArray) {
+ var a = BigArray.of(2n, 3n, 4n);
+ var seen = [];
+ assertTrue(a.every((x) => {seen.push(x); return x > 1n}));
+ assertEquals([2n, 3n, 4n], seen);
+});
+
+test_both(function fill(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n, 4n);
+ a.fill(7n, 1, 3);
+ assertArrayEquals([1n, 7n, 7n, 4n], a);
+ assertThrows(() => (new BigArray(3).fill(1)), TypeError);
+});
+
+test_both(function filter(BigArray) {
+ var a = BigArray.of(1n, 3n, 4n, 2n);
+ var b = a.filter((x) => x > 2n);
+ assertArrayEquals([3n, 4n], b);
+});
+
+test_both(function find(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ assertEquals(2n, a.find((x) => x === 2n));
+ assertEquals(undefined, a.find((x) => x === 2));
+});
+
+test_both(function findIndex(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ assertEquals(1, a.findIndex((x) => x === 2n));
+ assertEquals(-1, a.findIndex((x) => x === 2));
+});
+
+test_both(function forEach(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ var seen = [];
+ a.forEach((x) => seen.push(x));
+ assertEquals([1n, 2n, 3n], seen);
+});
+
+test_both(function from(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ var b = BigArray.from(a);
+ assertArrayEquals([1n, 2n, 3n], b);
+ assertThrows(() => BigArray.from([4, 5]), TypeError);
+ var c = BigArray.from([6, 7], BigInt);
+ assertArrayEquals([6n, 7n], c);
+ assertThrows(() => Int32Array.from([4n, 5n]), TypeError);
+ assertThrows(() => Int32Array.from([4, 5], BigInt), TypeError);
+});
+
+test(function from_mixed() {
+ var contents = [1n, 2n, 3n];
+ var a = new BigInt64Array(contents);
+ var b = BigUint64Array.from(a);
+ assertArrayEquals(contents, b);
+ var c = BigInt64Array.from(b);
+ assertArrayEquals(contents, c);
+});
+
+test_both(function includes(BigArray) {
+ var a = BigArray.of(0n, 1n, 2n);
+ assertTrue(a.includes(1n));
+ assertFalse(a.includes(undefined));
+ assertFalse(a.includes(1));
+ assertFalse(a.includes(0x1234567890abcdef123n)); // More than 64 bits.
+});
+
+test_both(function indexOf(BigArray) {
+ var a = BigArray.of(0n, 1n, 2n);
+ assertEquals(1, a.indexOf(1n));
+ assertEquals(-1, a.indexOf(undefined));
+ assertEquals(-1, a.indexOf(1));
+ assertEquals(-1, a.indexOf(0x1234567890abcdef123n)); // More than 64 bits.
+});
+
+test_both(function join(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ assertEquals("1-2-3", a.join("-"));
+});
+
+test_both(function keys(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ var it = a.keys();
+ assertEquals(0, it.next().value);
+ assertEquals(1, it.next().value);
+ assertEquals(2, it.next().value);
+ assertTrue(it.next().done);
+});
+
+test_both(function lastIndexOf(BigArray) {
+ var a = BigArray.of(0n, 1n, 2n);
+ assertEquals(1, a.lastIndexOf(1n));
+ assertEquals(-1, a.lastIndexOf(undefined));
+ assertEquals(-1, a.lastIndexOf(1));
+ assertEquals(-1, a.lastIndexOf(0x1234567890abcdef123n)); // > 64 bits.
+});
+
+test_both(function map(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ var b = a.map((x) => 2n * x);
+ assertEquals(BigArray, b.constructor);
+ assertArrayEquals([2n, 4n, 6n], b);
+});
+
+test_both(function of(BigArray) {
+ var a = BigArray.of(true, 2n, {valueOf: () => 3n}, "4");
+ assertArrayEquals([1n, 2n, 3n, 4n], a);
+ assertThrows(() => BigArray.of(1), TypeError)
+ assertThrows(() => BigArray.of(undefined), TypeError)
+});
+
+test_both(function reduce(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ assertEquals(6n, a.reduce((sum, x) => sum + x, 0n));
+});
+
+test_both(function reduceRight(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ assertEquals(6n, a.reduce((sum, x) => sum + x, 0n));
+});
+
+test_both(function reverse(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ a.reverse();
+ assertArrayEquals([3n, 2n, 1n], a);
+});
+
+test_both(function set(BigArray) {
+ var a = new BigArray(7);
+ a.set(BigArray.of(1n, 2n, 3n), 2);
+ assertArrayEquals([0n, 0n, 1n, 2n, 3n, 0n, 0n], a);
+ a.set([4n, 5n, 6n], 1);
+ assertArrayEquals([0n, 4n, 5n, 6n, 3n, 0n, 0n], a);
+ assertThrows(() => a.set([7, 8, 9], 3), TypeError);
+ assertThrows(() => a.set(Int32Array.of(10, 11), 2), TypeError);
+
+ var Other = BigArray == BigInt64Array ? BigUint64Array : BigInt64Array;
+ a.set(Other.of(12n, 13n), 4);
+ assertArrayEquals([0n, 4n, 5n, 6n, 12n, 13n, 0n], a);
+});
+
+test_both(function slice(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n, 4n);
+ var b = a.slice(1, 3);
+ assertArrayEquals([2n, 3n], b);
+});
+
+test_both(function some(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ assertTrue(a.some((x) => x === 2n));
+});
+
+test_both(function sort(BigArray) {
+ var a = BigArray.of(7n, 2n, 5n, 3n);
+ a.sort();
+ assertArrayEquals([2n, 3n, 5n, 7n], a);
+});
+
+test_both(function subarray(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n, 4n);
+ var b = a.subarray(1, 3);
+ assertEquals(BigArray, b.constructor);
+ assertArrayEquals([2n, 3n], b);
+});
+
+test_both(function toString(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ assertEquals("1,2,3", a.toString());
+});
+
+test_both(function values(BigArray) {
+ var a = BigArray.of(1n, 2n, 3n);
+ var it = a.values();
+ assertEquals(1n, it.next().value);
+ assertEquals(2n, it.next().value);
+ assertEquals(3n, it.next().value);
+ assertTrue(it.next().done);
+});
diff --git a/deps/v8/test/mjsunit/harmony/bigint/xor.js b/deps/v8/test/mjsunit/harmony/bigint/xor.js
index d8c9012971..a934825bd9 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/xor.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/xor.js
@@ -7,96 +7,94 @@
// Flags: --harmony-bigint
var data = [{
- a: "abde23cae3113c95ec7f444c7277658",
- b: "-65e40fb1",
- r: "-abde23cae3113c95ec7f444a2c379e9"
+ a: -0x46505bec40d461c595b5e4be178b7d00n,
+ b: -0x9170e5437d4e3ec7c0971e2c6d3bbbd2929ff108ea4ee64f7a91aa367fn,
+ r: 0x9170e5437d4e3ec7c0971e2c6d7deb897edf25692fdb53abc486214a81n
}, {
- a: "2d0bbdc05059c78b7e9f43689b2f7a9afaefd679212c2a9b990",
- b: "29fcdb109b54650f9762b494916bc1cf14853430697febe7acf4327983ce0c6c4c183",
- r: "29fcdb109b54650f974fbf29513b98089ffbab7301e4c49d360eddaffaef2046d7813"
+ a: -0x49171f24aa9660f7f854148018a8b03256200508n,
+ b: 0x75c2103e4e8e52d0311n,
+ r: -0x49171f24aa9660f7f85413dc39ab54dab30d0617n
}, {
- a: "b958dc77068d01811e031d6320df5e53823697be94f7654340b",
- b: "-c1f5ca609a658e24fc33fec10a84b18fb745cb7c6",
- r: "-b958dc77064cf44b7e9978ed04236dad433c130f1b4020883cf"
+ a: -0x3cde31210d299e4f6734f76c4f2df3056fn,
+ b: -0x402b7fe66d16877867f43n,
+ r: 0x3cde31210d299a4dd0ca91bd275a757a2cn
}, {
- a: "cf7319e3fe16912370c830906f88b",
- b: "98d972f3c",
- r: "cf7319e3fe16912370c8a8491d7b7"
+ a: 0x727n,
+ b: 0xe1c82371da63bdb801273077095be8977ff9f14aa619829bf4b418n,
+ r: 0xe1c82371da63bdb801273077095be8977ff9f14aa619829bf4b33fn
}, {
- a: "aea6d9e7cec74bca19",
- b: "5abbcd0c5aa1f96fef9db32b3618de782db64b8f6b4",
- r: "5abbcd0c5aa1f96fef9db32b3cf2b3e6515a3f33cad"
+ a: 0x7c2b1e0918a85bf5faea9077b7dn,
+ b: -0xed714ba58fd54b19n,
+ r: -0x7c2b1e0918a68ce140b26d23066n
}, {
- a: "-b522a022e90fa094f3b729a7a0a914349f5e1fd778829d7576ad36711",
- b: "-aa00d2fd6a7636",
- r: "b522a022e90fa094f3b729a7a0a914349f5e1fd778883d78597b91125"
+ a: 0x1dded5fd695f4babcan,
+ b: 0x7e1cb4346c68e84f8fbdd3501daead2ce99a90e56038n,
+ r: 0x7e1cb4346c68e84f8fbdd3501db373f914f3cfaecbf2n
}, {
- a: "9c2bc822ec4a590eb8a77ee630009713090",
- b: "30b13459c1434",
- r: "9c2bc822ec4a590eb8a77ed68134ced24a4"
+ a: 0xec7d9595de759652a3bb96c80edca63790c32ce7f6cf0ef0n,
+ b: -0x67f0feef424f56d6ae6856a20901199de53ebn,
+ r: -0xec7d9595de73e95d4d4fb23d63b640b2fae3bcf66f115d1bn
}, {
- a: "-f14873e1f6121d584d5541073c7ce162873e156b72fb3c943ffd5f212c0d6",
- b: "f449f0a292048546924d2973626f5441c045d4adbfd00d301791f0db965f",
- r: "-fe0cecebdf32550c247193900a5a14269b3a4821a9063c473e84402c9568b"
+ a: 0x69ab204291f752866c3d49fdf1d656298f43e896cea3ef31a04n,
+ b: -0x5f5b2fd130e33ed76fa22de5ac70bdf96dee80a09e3107e1c93a135ea80b7e3640cdn,
+ r: -0x5f5b2fd130e33ed769389fe1856fc8d10b2d543f412c628351ce2dd7c4e140c55ac9n
}, {
- a: "83d5552fba4213d8dd1ed9bc6c2",
- b: "4f7ccc10ba9b6880b862f8d5e1c9",
- r: "47419942413f49bd35b3154e270b"
+ a: -0xd396d40076en,
+ b: 0x3158623c80393n,
+ r: -0x318bf4e8804ffn
}, {
- a: "9fdb44741177921c8338b70fc7fa362295bfc92f6275fa16492",
- b: "93676e5ef972",
- r: "9fdb44741177921c8338b70fc7fa362295bfc92654031ff9de0"
+ a: 0x8c1dbfbd68007d171986777446303896a1ee46n,
+ b: -0x75fd69710f5bea1cece9d59ec4bca29712b49dcf5ee5cc3299d7fb4fb024f10ae955fn,
+ r: -0x75fd69710f5bea1cece9d59ec4bca29fd36f6619dee21d4301b08c0bd3277860f7b19n
}, {
- a: "4355637ed",
- b: "-7aeb3013cc5eb39d56eed8104407a3e68039944f7673a0c75bd3",
- r: "-7aeb3013cc5eb39d56eed8104407a3e68039944f767795916c40"
+ a: -0x4c6e3ccbabdd6f58450ec3ec8adfb10831b70893cb996f0ac97a0ae2f3a943185d1n,
+ b: 0xeb136731a19867949d46886e62050c4b446767a076c73dn,
+ r: -0x4c6e3ccbabdd6f58450ecd5dbcacab11b7ce4147a31f892a99bebea485d344742een
}, {
- a: "7fdf50188f716c13feced67a1c33ecf422",
- b: "-7106cd7b9",
- r: "-7fdf50188f716c13feced67a1b2380239b"
+ a: 0xeaff60e10ebb4b8f2da6a517n,
+ b: -0x17af62a3e808af1be3d864132dfd7363bc95872580585d7a9904n,
+ r: -0x17af62a3e808af1be3d864132dfd999cdc74899ecbd770dc3c15n
}, {
- a: "368cf8d0f5790a03774b9a1e116f82281ebd9e18de7f54a7d91f50",
- b: "8bc4e4f24ce2a7d037079552e6c7db2795f15c92a01f4e0d9",
- r: "368cf06cbb362ecd5d36996e683aac44630fe747cbb67ea62dff89"
+ a: -0xd4424b4f09210108076d63bd621180f280df8f4480n,
+ b: -0x20953185dd2c534b3cb8da73ce55ab386d62fe8a793a1e74cdf3ad95f3cc2573b3n,
+ r: 0x20953185dd2c534b3cb8da731a17e0776443ff827e577dc9afe22d677313aa37cdn
}, {
- a: "-7466a596078a20cc4eca96953e3",
- b: "-666328e5437b1475dcfe2f44f1c6a82af82ce7ee7cf229c8398836d2d834f9014",
- r: "666328e5437b1475dcfe2f44f1c6a82af82ce79a1a57bfcfb3a8fa9c12a26c3f1"
+ a: 0x84317d7ec6df6dbfe9413cee812ff95c587f61c7b8de5828d445a69555cff26fba9n,
+ b: -0x853c667aed62685df5993748e5668802b7bf918f8c1222a5267c33f013ff1e10f1b909n,
+ r: -0x8534256d3a8e05ab2e67a35b2b8e9afd227a16799069af40a4f177aa7aaa42efd742a2n
}, {
- a: "ad284b70a22d96bdefba53f134c65a1e4958013bb9a31f091fde6fc89",
- b: "-c89374df2",
- r: "-ad284b70a22d96bdefba53f134c65a1e4958013bb9a31f09d74d1b179"
+ a: -0xe3b4bf724b172b23c5834ed6f70f984ab3b146070770cbc3b86779db7n,
+ b: -0x68166de3a03d9efce30cb36e242db000c850c0d4f454594e23a1a7cn,
+ r: 0xe3dca91fa8b716bd39604265992bb5fab37916c7d3849f9af644d87cdn
}, {
- a: "-47df52354db5",
- b: "-aa7f61aba9ad859e803e964418af30",
- r: "aa7f61aba9ad859e807949162de29b"
+ a: -0x5358b8efb260b40e37cb5b45eb4e7864n,
+ b: -0x3e617e3967a5b3554ebf24f1e51a253dfc20a76ef01f02442fn,
+ r: 0x3e617e3967a5b3554eec7c490aa84589f2176c35b5f44c3c4dn
}, {
- a: "-f03ea80f22a3dc03f036b13f85faf5fb1",
- b: "86e9110772d369fdd52b45a8fb22cea26cb73e908408f8a3cdf637f0042c8efdc11",
- r: "-86e9110772d369fdd52b45a8fb22cea26c4700388b2a5b7fce0601413ba974083a2"
+ a: -0x702359917a8aceedc381n,
+ b: -0x714f08d9c29e9fc0044982eb2469707896265n,
+ r: 0x714f08d9c29e9fc0034bb77233c1dc964a1e4n
}, {
- a: "3603d29c8",
- b: "f4849ec3ec3c352b",
- r: "f4849ec08c011ce3"
+ a: -0x455ac38dn,
+ b: -0x6152562bf5b6f785abec41e8625bccd3bf3067225733dan,
+ r: 0x6152562bf5b6f785abec41e8625bccd3bf3067670df055n
}, {
- a: "e6668ed8eae8b4bb7bdf522d44e9f1bcf66",
- b: "361cab4f5be1",
- r: "e6668ed8eae8b4bb7bdf522e25234549487"
+ a: 0x47n,
+ b: 0xa3d30490286ddf5d4f4256n,
+ r: 0xa3d30490286ddf5d4f4211n
}, {
- a: "-d0395d",
- b: "-4a8ee89d006d22a124070241caf5f4343bdfd30d12",
- r: "4a8ee89d006d22a124070241caf5f4343bdf03344d"
+ a: -0x530cc599859ccdbbb3c1dcb46248a4474c63323cc58a7891da79e0322b91c795ac57n,
+ b: 0x2d986083244cd488657c947a952ae15b23d90ebbc34daan,
+ r: -0x530cc599859ccdbbb3c1dc99fa28276300b7ba59b91e0204f098bb11f29f7c56e1fdn
}];
var error_count = 0;
for (var i = 0; i < data.length; i++) {
var d = data[i];
- var a = BigInt.parseInt(d.a, 16);
- var b = BigInt.parseInt(d.b, 16);
- var r = a ^ b;
- if (d.r !== r.toString(16)) {
- print("Input A: " + a.toString(16));
- print("Input B: " + b.toString(16));
+ var r = d.a ^ d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(16));
+ print("Input B: " + d.b.toString(16));
print("Result: " + r.toString(16));
print("Expected: " + d.r);
print("Op: ^");
diff --git a/deps/v8/test/mjsunit/harmony/function-sent.js b/deps/v8/test/mjsunit/harmony/function-sent.js
deleted file mode 100644
index cd0ca957a8..0000000000
--- a/deps/v8/test/mjsunit/harmony/function-sent.js
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-function-sent
-
-
-{
- function* g() { return function.sent }
- assertEquals({value: 42, done: true}, g().next(42));
-}
-
-
-{
- function* g() {
- try {
- yield function.sent;
- } finally {
- yield function.sent;
- return function.sent;
- }
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, x.next(1));
- assertEquals({value: 2, done: false}, x.next(2));
- assertEquals({value: 3, done: true}, x.next(3));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, x.next(1));
- assertEquals({value: 2, done: false}, x.throw(2));
- assertEquals({value: 3, done: true}, x.next(3));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, x.next(1));
- assertEquals({value: 2, done: false}, x.return(2));
- assertEquals({value: 3, done: true}, x.next(3));
- }
-}
-
-
-{
- function* inner() {
- try {
- yield function.sent;
- } finally {
- return 23;
- }
- }
-
- function* g() {
- yield function.sent;
- yield* inner();
- return function.sent;
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, x.next(1));
- assertEquals({value: undefined, done: false}, x.next(2));
- assertEquals({value: 3, done: true}, x.next(3));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, x.next(1));
- assertEquals({value: undefined, done: false}, x.next(2));
- assertEquals({value: 42, done: true}, x.throw(42));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, x.next(1));
- assertEquals({value: undefined, done: false}, x.next(2));
- assertEquals({value: 23, done: true}, x.return(42));
- }
-}
-
-
-assertThrows("function f() { return function.sent }", SyntaxError);
-assertThrows("() => { return function.sent }", SyntaxError);
-assertThrows("() => { function.sent }", SyntaxError);
-assertThrows("() => function.sent", SyntaxError);
-assertThrows("({*f() { function.sent }})", SyntaxError);
-assertDoesNotThrow("({*f() { return function.sent }})");
diff --git a/deps/v8/test/mjsunit/harmony/function-tostring.js b/deps/v8/test/mjsunit/harmony/function-tostring.js
index 949ac2282f..8fccf0dacd 100644
--- a/deps/v8/test/mjsunit/harmony/function-tostring.js
+++ b/deps/v8/test/mjsunit/harmony/function-tostring.js
@@ -122,3 +122,25 @@ testDynamicFunction("a, b", "return a");
testDynamicFunction("a,/*A*/b", "return a");
testDynamicFunction("/*A*/a,b", "return a");
testDynamicFunction("a,b", "return a/*A*/");
+
+// Proxies of functions should not throw, but return a NativeFunction.
+assertEquals("function () { [native code] }",
+ new Proxy(function () { hidden }, {}).toString());
+assertEquals("function () { [native code] }",
+ new Proxy(() => { hidden }, {}).toString());
+assertEquals("function () { [native code] }",
+ new Proxy(class {}, {}).toString());
+assertEquals("function () { [native code] }",
+ new Proxy(function() { hidden }.bind({}), {}).toString());
+assertEquals("function () { [native code] }",
+ new Proxy(function*() { hidden }, {}).toString());
+assertEquals("function () { [native code] }",
+ new Proxy(async function() { hidden }, {}).toString());
+assertEquals("function () { [native code] }",
+ new Proxy(async function*() { hidden }, {}).toString());
+assertEquals("function () { [native code] }",
+ new Proxy({ method() { hidden } }.method, {}).toString());
+
+// Non-callable proxies still throw.
+assertThrows(() => Function.prototype.toString.call(new Proxy({}, {})),
+ TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-ic.js b/deps/v8/test/mjsunit/harmony/private-fields-ic.js
new file mode 100644
index 0000000000..e7889b1e7b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-fields-ic.js
@@ -0,0 +1,294 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-fields
+
+{
+ class X {
+ #x = 1;
+ getX(arg) { return arg.#x; }
+ setX(arg, val) { arg.#x = val; }
+ }
+
+ let x1 = new X;
+ let y = new class {};
+
+ // IC: 0 -> Error
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+
+ assertThrows(() => x1.setX(y, 2), TypeError);
+ assertThrows(() => x1.setX(y, 3), TypeError);
+ assertThrows(() => x1.setX(y, 4), TypeError);
+
+ // IC: 0 -> Monomorphic
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+
+ x1.setX(x1, 2);
+ x1.setX(x1, 3);
+ x1.setX(x1, 4);
+}
+
+{
+ class X {
+ #x = 1;
+ getX(arg) { return arg.#x; }
+ setX(arg, val) { arg.#x = val; }
+ }
+
+ let x1 = new X;
+ // IC: 0 -> Monomorphic
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+
+ x1.setX(x1, 2);
+ x1.setX(x1, 3);
+ x1.setX(x1, 4);
+
+ let y = new class {};
+ // IC: Monomorphic -> Error
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+
+ assertThrows(() => x1.setX(y, 2), TypeError);
+ assertThrows(() => x1.setX(y, 3), TypeError);
+ assertThrows(() => x1.setX(y, 4), TypeError);
+
+ let x3 = new X;
+ // IC: Monomorphic -> Monomorphic
+ assertEquals(1, x1.getX(x3));
+ assertEquals(1, x1.getX(x3));
+ assertEquals(1, x1.getX(x3));
+
+ x1.setX(x3, 2);
+ x1.setX(x3, 3);
+ x1.setX(x3, 4);
+}
+
+
+{
+ class X {
+ #x = 1;
+ getX(arg) { return arg.#x; }
+ setX(arg, val) { arg.#x = val; }
+ }
+
+ let x1 = new X;
+ // IC: 0 -> Monomorphic
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+
+ x1.setX(x1, 2);
+ x1.setX(x1, 3);
+ x1.setX(x1, 4);
+
+ class X2 extends X {
+ #x2 = 2;
+ }
+
+ let x2 = new X2;
+ // IC: Monomorphic -> Polymorphic
+ assertEquals(1, x1.getX(x2));
+ assertEquals(1, x1.getX(x2));
+ assertEquals(1, x1.getX(x2));
+
+ x1.setX(x2, 2);
+ x1.setX(x2, 3);
+ x1.setX(x2, 4);
+
+ let y = new class {};
+
+ // IC: Polymorphic -> Error
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+
+ assertThrows(() => x1.setX(y, 2), TypeError);
+ assertThrows(() => x1.setX(y, 3), TypeError);
+ assertThrows(() => x1.setX(y, 4), TypeError);
+
+ class X3 extends X {
+ #x3 = 2;
+ }
+
+ let x3 = new X3;
+ // IC: Polymorphic -> Polymorphic
+ assertEquals(1, x1.getX(x3));
+ assertEquals(1, x1.getX(x3));
+ assertEquals(1, x1.getX(x3));
+
+ x1.setX(x3, 2);
+ x1.setX(x3, 3);
+ x1.setX(x3, 4);
+}
+
+{
+ class X {
+ #x = 1;
+ getX(arg) { return arg.#x; }
+ setX(arg, val) { arg.#x = val; }
+ }
+
+ let x1 = new X;
+ // IC: 0 -> Monomorphic
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+ assertEquals(1, x1.getX(x1));
+
+ x1.setX(x1, 2);
+ x1.setX(x1, 3);
+ x1.setX(x1, 4);
+
+ class X2 extends X {
+ #x2 = 2;
+ }
+
+ let x2 = new X2;
+ // IC: Monomorphic -> Polymorphic
+ assertEquals(1, x1.getX(x2));
+ assertEquals(1, x1.getX(x2));
+ assertEquals(1, x1.getX(x2));
+
+ x1.setX(x2, 2);
+ x1.setX(x2, 3);
+ x1.setX(x2, 4);
+
+ class X3 extends X {
+ #x3 = 2;
+ }
+
+ let x3 = new X3;
+ assertEquals(1, x1.getX(x3));
+ assertEquals(1, x1.getX(x3));
+ assertEquals(1, x1.getX(x3));
+
+ x1.setX(x3, 2);
+ x1.setX(x3, 3);
+ x1.setX(x3, 4);
+
+
+ class X4 extends X {
+ #x4 = 2;
+ }
+
+ let x4 = new X4;
+ assertEquals(1, x1.getX(x4));
+ assertEquals(1, x1.getX(x4));
+ assertEquals(1, x1.getX(x4));
+
+ x1.setX(x4, 2);
+ x1.setX(x4, 3);
+ x1.setX(x4, 4);
+
+ class X5 extends X {
+ #x5 = 2;
+ }
+
+ let x5 = new X5;
+ // IC: Polymorphic -> Megamorphic
+ assertEquals(1, x1.getX(x5));
+ assertEquals(1, x1.getX(x5));
+ assertEquals(1, x1.getX(x5));
+
+ x1.setX(x5, 2);
+ x1.setX(x5, 3);
+ x1.setX(x5, 4);
+
+ let y = new class {};
+
+ // IC: Megamorphic -> Error
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+ assertThrows(() => x1.getX(y), TypeError);
+
+ assertThrows(() => x1.setX(y, 2), TypeError);
+ assertThrows(() => x1.setX(y, 3), TypeError);
+ assertThrows(() => x1.setX(y, 4), TypeError);
+
+ class X6 extends X {
+ #x6 = 2;
+ }
+
+ let x6 = new X6;
+ // IC: Megamorphic -> Megamorphic
+ assertEquals(1, x1.getX(x6));
+ assertEquals(1, x1.getX(x6));
+ assertEquals(1, x1.getX(x6));
+
+ x1.setX(x6, 2);
+ x1.setX(x6, 3);
+ x1.setX(x6, 4);
+}
+
+{
+ class C {
+ #a = 1;
+ getA() { return this.#a; }
+ setA(v) { this.#a = v; }
+ }
+
+ let p = new Proxy(new C, {
+ get(target, name) {
+ return target[name];
+ },
+
+ set(target, name, val) {
+ target[name] = val;
+ }
+ });
+
+ assertThrows(() => p.getA(), TypeError);
+ assertThrows(() => p.getA(), TypeError);
+ assertThrows(() => p.getA(), TypeError);
+
+ assertThrows(() => p.setA(2), TypeError);
+ assertThrows(() => p.setA(3), TypeError);
+ assertThrows(() => p.setA(4), TypeError);
+
+ let x = new Proxy(new C, {});
+ assertThrows(() => x.getA(), TypeError);
+ assertThrows(() => x.getA(), TypeError);
+ assertThrows(() => x.getA(), TypeError);
+
+ assertThrows(() => x.setA(2), TypeError);
+ assertThrows(() => x.setA(3), TypeError);
+ assertThrows(() => x.setA(4), TypeError);
+}
+
+{
+ class A {
+ constructor(arg) {
+ return arg;
+ }
+ }
+
+ class X extends A {
+ #x = 1;
+
+ constructor(arg) {
+ super(arg);
+ }
+
+ getX(arg) { return arg.#x; }
+
+ setX(arg, val) { arg.#x = val; }
+ }
+
+ let proxy = new Proxy({}, {});
+ let x = new X(proxy);
+
+ assertEquals(1, X.prototype.getX(proxy));
+ assertEquals(1, X.prototype.getX(proxy));
+ assertEquals(1, X.prototype.getX(proxy));
+
+ X.prototype.setX(proxy, 2);
+ X.prototype.setX(proxy, 3);
+ X.prototype.setX(proxy, 4);
+}
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-special-object.js b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
new file mode 100644
index 0000000000..309143d904
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-fields --allow-natives-syntax
+
+async function f(assert) {
+ try {
+ module_namespace_obj = await import('modules-skip-1.js');
+ } catch(e) {
+ %AbortJS(e);
+ }
+
+ class A {
+ constructor(arg) {
+ return arg;
+ }
+ }
+
+ class X extends A {
+ #x = 1;
+
+ constructor(arg) {
+ super(arg);
+ }
+
+ getX(arg) {
+ return arg.#x;
+ }
+
+ setX(arg, val) { arg.#x = val; }
+ }
+
+ let x = new X(module_namespace_obj);
+
+ assert.equals(1, X.prototype.getX(module_namespace_obj));
+ assert.equals(1, X.prototype.getX(module_namespace_obj));
+ assert.equals(1, X.prototype.getX(module_namespace_obj));
+
+ X.prototype.setX(module_namespace_obj, 2);
+ X.prototype.setX(module_namespace_obj, 3);
+ X.prototype.setX(module_namespace_obj, 4);
+}
+
+testAsync(assert => {
+ assert.plan(3);
+
+ f(assert).catch(assert.unreachable);
+}, "private-fields-special-object");
diff --git a/deps/v8/test/mjsunit/harmony/private-fields.js b/deps/v8/test/mjsunit/harmony/private-fields.js
new file mode 100644
index 0000000000..5edf606407
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-fields.js
@@ -0,0 +1,441 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-fields --allow-natives-syntax
+
+
+"use strict";
+
+// TODO(gsathya): Missing tests:
+// (d) tests involving eval
+{
+ class C {
+ #a;
+ getA() { return this.#a; }
+ }
+
+ assertEquals(undefined, C.a);
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+ assertEquals(undefined, c.getA());
+}
+
+{
+ class C {
+ #a = 1;
+ getA() { return this.#a; }
+ }
+
+ assertEquals(undefined, C.a);
+
+ let c = new C;
+ assertEquals(undefined, c.a);
+ assertEquals(1, c.getA());
+}
+
+{
+ class C {
+ #a = 1;
+ #b = this.#a;
+ getB() { return this.#b; }
+ }
+
+ let c = new C;
+ assertEquals(1, c.getB());
+}
+
+{
+ class C {
+ #a = 1;
+ getA() { return this.#a; }
+ constructor() {
+ assertEquals(1, this.#a);
+ this.#a = 5;
+ }
+ }
+
+ let c = new C;
+ assertEquals(5, c.getA());
+}
+
+{
+ class C {
+ #a = this;
+ #b = () => this;
+ getA() { return this.#a; }
+ getB() { return this.#b; }
+ }
+
+ let c1 = new C;
+ assertSame(c1, c1.getA());
+ assertSame(c1, c1.getB()());
+ let c2 = new C;
+ assertSame(c1, c1.getB().call(c2));
+}
+
+{
+ class C {
+ #a = this;
+ #b = function() { return this; };
+ getA() { return this.#a; }
+ getB() { return this.#b; }
+ }
+
+ let c1 = new C;
+ assertSame(c1, c1.getA());
+ assertSame(c1, c1.getB().call(c1));
+ let c2 = new C;
+ assertSame(c2, c1.getB().call(c2));
+}
+
+
+{
+ class C {
+ #a = function() { return 1 };
+ getA() {return this.#a;}
+ }
+
+ let c = new C;
+ assertEquals('#a', c.getA().name);
+}
+
+{
+ let d = function() { return new.target; }
+ class C {
+ #c = d;
+ getC() { return this.#c; }
+ }
+
+ let c = new C;
+ assertEquals(undefined, c.getC()());
+ assertSame(new d, new (c.getC()));
+}
+
+{
+ class C {
+ #b = new.target;
+ #c = () => new.target;
+ getB() { return this.#b; }
+ getC() { return this.#c; }
+ }
+
+ let c = new C;
+ assertEquals(undefined, c.getB());
+ assertEquals(undefined, c.getC()());
+}
+
+{
+ class C {
+ #a = 1;
+ #b = () => this.#a;
+ getB() { return this.#b; }
+ }
+
+ let c1 = new C;
+ assertSame(1, c1.getB()());
+}
+
+{
+ class C {
+ #a = 1;
+ getA(instance) { return instance.#a; }
+ }
+
+ class B { }
+ let c = new C;
+ assertEquals(undefined, c.a);
+ assertEquals(1, c.getA(c));
+
+ assertThrows(() => c.getA(new B), TypeError);
+}
+
+{
+ class A {
+ #a = 1;
+ getA() { return this.#a; }
+ }
+
+ class B extends A {}
+ let b = new B;
+ assertEquals(1, b.getA());
+}
+
+{
+ let prototypeLookup = false;
+ class A {
+ set a(val) {
+ prototypeLookup = true;
+ }
+
+ get a() { return undefined; }
+ }
+
+ class C extends A {
+ #a = 1;
+ getA() { return this.#a; }
+ }
+
+ let c = new C;
+ assertEquals(1, c.getA());
+ assertEquals(false, prototypeLookup);
+}
+
+{
+ class A {
+ constructor() { this.a = 1; }
+ }
+
+ class B extends A {
+ #b = this.a;
+ getB() { return this.#b; }
+ }
+
+ let b = new B;
+ assertEquals(1, b.getB());
+}
+
+{
+ class A {
+ #a = 1;
+ getA() { return this.#a; }
+ }
+
+ class B extends A {
+ #b = super.getA();
+ getB() { return this.#b; }
+ }
+
+ let b = new B;
+ assertEquals(1, b.getB());
+}
+
+{
+ class A {
+ #a = 1;
+ getA() { return this.#a;}
+ }
+
+ class B extends A {
+ #a = 2;
+ get_A() { return this.#a;}
+ }
+
+ let a = new A;
+ let b = new B;
+ assertEquals(1, a.getA());
+ assertEquals(1, b.getA());
+ assertEquals(2, b.get_A());
+}
+
+{
+ let foo = undefined;
+ class A {
+ #a = 1;
+ constructor() {
+ foo = this.#a;
+ }
+ }
+
+ let a = new A;
+ assertEquals(1, foo);
+}
+
+{
+ let foo = undefined;
+ class A extends class {} {
+ #a = 1;
+ constructor() {
+ super();
+ foo = this.#a;
+ }
+ }
+
+ let a = new A;
+ assertEquals(1, foo);
+}
+
+{
+ function makeClass() {
+ return class {
+ #a;
+ setA(val) { this.#a = val; }
+ getA() { return this.#a; }
+ }
+ }
+
+ let classA = makeClass();
+ let a = new classA;
+ let classB = makeClass();
+ let b = new classB;
+
+ assertEquals(undefined, a.getA());
+ assertEquals(undefined, b.getA());
+
+ a.setA(3);
+ assertEquals(3, a.getA());
+ assertEquals(undefined, b.getA());
+
+ b.setA(5);
+ assertEquals(3, a.getA());
+ assertEquals(5, b.getA());
+
+ assertThrows(() => a.getA.call(b), TypeError);
+ assertThrows(() => b.getA.call(a), TypeError);
+}
+
+{
+ let value = undefined;
+
+ new class {
+ #a = 1;
+ getA() { return this.#a; }
+
+ constructor() {
+ new class {
+ #a = 2;
+ constructor() {
+ value = this.#a;
+ }
+ }
+ }
+ }
+
+ assertEquals(2, value);
+}
+
+{
+ class A {
+ #a = 1;
+ b = class {
+ getA() { return this.#a; }
+ get_A(val) { return val.#a; }
+ }
+ }
+
+ let a = new A();
+ let b = new a.b;
+ assertEquals(1, b.getA.call(a));
+ assertEquals(1, b.get_A(a));
+}
+
+{
+ class C {
+ b = this.#a;
+ #a = 1;
+ }
+
+ assertThrows(() => new C, TypeError);
+}
+
+{
+ class C {
+ #b = this.#a;
+ #a = 1;
+ }
+
+ assertThrows(() => new C, TypeError);
+}
+
+{
+ let symbol = Symbol();
+
+ class C {
+ #a = 1;
+ [symbol] = 1;
+ getA() { return this.#a; }
+ setA(val) { this.#a = val; }
+ }
+
+ var p = new Proxy(new C, {
+ get: function(target, name) {
+ if (typeof(arg) === 'symbol') {
+ assertFalse(%SymbolIsPrivate(name));
+ }
+ return target[name];
+ }
+ });
+
+ assertThrows(() => p.getA(), TypeError);
+ assertThrows(() => p.setA(1), TypeError);
+ assertEquals(1, p[symbol]);
+}
+
+{
+ class C {
+ #b = Object.freeze(this);
+ #a = 1;
+ getA() { return this.#a; }
+ }
+
+ let c = new C;
+ assertEquals(1, c.getA());
+}
+
+{
+ class C {
+ #a = 1;
+ setA(another, val) { another.#a = val; }
+ getA(another) { return another.#a; }
+ }
+
+ let c = new C;
+ assertThrows(() => c.setA({}, 2), TypeError);
+ c.setA(c, 3);
+ assertEquals(3, c.getA(c));
+}
+
+{
+ class A {
+ constructor(arg) {
+ return arg;
+ }
+ }
+
+ class C extends A {
+ #x = 1;
+
+ constructor(arg) {
+ super(arg);
+ }
+
+ getX(arg) {
+ return arg.#x;
+ }
+ }
+
+ let leaker = new Proxy({}, {});
+ let c = new C(leaker);
+ assertEquals(1, C.prototype.getX(leaker));
+ assertSame(c, leaker);
+
+ c = new C();
+ assertThrows(() => new C(c), TypeError);
+
+ new C(1);
+}
+
+{
+ class C {
+ #a = 1;
+ b;
+ getA() { return this.b().#a; }
+ }
+
+ let c = new C();
+ c.b = () => c;
+ assertEquals(1, c.getA());
+}
+
+{
+ class C {
+ #a = 1;
+ b;
+ getA(arg) { return arg.b().#a; }
+ }
+
+ let c = new C();
+ c.b = () => c;
+ assertEquals(1, c.getA(c));
+}
diff --git a/deps/v8/test/mjsunit/harmony/private.js b/deps/v8/test/mjsunit/harmony/private.js
index 39376f3ea9..c4306c516c 100644
--- a/deps/v8/test/mjsunit/harmony/private.js
+++ b/deps/v8/test/mjsunit/harmony/private.js
@@ -57,8 +57,6 @@ function TestType() {
assertEquals("symbol", typeof symbols[i])
assertTrue(typeof symbols[i] === "symbol")
assertTrue(%SymbolIsPrivate(symbols[i]))
- assertEquals(null, %_ClassOf(symbols[i]))
- assertEquals("Symbol", %_ClassOf(Object(symbols[i])))
}
}
TestType()
diff --git a/deps/v8/test/mjsunit/keyed-store-array-literal.js b/deps/v8/test/mjsunit/keyed-store-array-literal.js
new file mode 100644
index 0000000000..6892a8fd18
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-store-array-literal.js
@@ -0,0 +1,75 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+function f1() {
+ const x = [,];
+ x[1] = 42;
+ assertEquals([undefined, 42], x);
+}
+
+f1();
+f1();
+%OptimizeFunctionOnNextCall(f1);
+f1();
+f1();
+
+
+function f2() {
+ const x = [0];
+ for (const y of [1, 2, 3, 4]) {
+ x[x.length] = y;
+ }
+ assertEquals([0, 1, 2, 3, 4], x);
+}
+
+f2();
+f2();
+%OptimizeFunctionOnNextCall(f2);
+f2();
+f2();
+
+
+function f3() {
+ const x = [0];
+ for (const y of [1.1, {}]) {
+ x[x.length] = y;
+ }
+ assertEquals([0, 1.1, {}], x);
+}
+
+f3();
+f3();
+%OptimizeFunctionOnNextCall(f3);
+f3();
+f3();
+
+
+function f4(x) {
+ x[x.length] = x.length;
+}
+
+let x1 = [];
+f4(x1);
+assertEquals([0], x1);
+f4(x1);
+assertEquals([0, 1], x1);
+%OptimizeFunctionOnNextCall(f4);
+f4(x1);
+assertEquals([0, 1, 2], x1);
+f4(x1);
+assertEquals([0, 1, 2, 3], x1);
+
+let x2 = {length: 42};
+f4(x2);
+assertEquals(42, x2[42]);
+f4(x2);
+assertEquals(42, x2[42]);
+%OptimizeFunctionOnNextCall(f4);
+f4(x2);
+assertEquals(42, x2[42]);
+f4(x2);
+assertEquals(42, x2[42]);
diff --git a/deps/v8/test/mjsunit/math-deopt.js b/deps/v8/test/mjsunit/math-deopt.js
new file mode 100644
index 0000000000..b5fff7d149
--- /dev/null
+++ b/deps/v8/test/mjsunit/math-deopt.js
@@ -0,0 +1,70 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+(()=> {
+ function f(a) {
+ return Math.abs(a);
+ }
+ f(1);
+ f(1);
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a) {
+ return Math.min(1,a);
+ }
+ f(1);
+ f(1);
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a) {
+ return Math.pow(a,10);
+ }
+ f(1);
+ f(1);
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a) {
+ return Math.clz32(a);
+ }
+ f(1);
+ f(1);
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a) {
+ return Math.imul(a, 10);
+ }
+ f(1);
+ f(1);
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ %OptimizeFunctionOnNextCall(f);
+ f("100");
+ assertOptimized(f);
+})();
diff --git a/deps/v8/test/mjsunit/mjsunit.gyp b/deps/v8/test/mjsunit/mjsunit.gyp
deleted file mode 100644
index e0a7469248..0000000000
--- a/deps/v8/test/mjsunit/mjsunit.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'mjsunit_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'mjsunit.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 10cf527f30..e51f2a9910 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -55,6 +55,9 @@ MjsUnitAssertionError.prototype.toString = function () {
// For known primitive values, please use assertEquals.
var assertSame;
+// Inverse of assertSame.
+var assertNotSame;
+
// Expected and found values are identical primitive values or functions
// or similarly structured objects (checking internal properties
// of, e.g., Number and Date objects, the elements of arrays
@@ -166,9 +169,6 @@ var isInterpreted;
// Returns true if given function is optimized.
var isOptimized;
-// Returns true if given function is compiled by Crankshaft.
-var isCrankshafted;
-
// Returns true if given function is compiled by TurboFan.
var isTurboFanned;
@@ -178,6 +178,8 @@ var testAsync;
// Monkey-patchable all-purpose failure handler.
var failWithMessage;
+// Returns a pretty-printed string representation of the passed value.
+var prettyPrinted;
(function () { // Scope for utility functions.
@@ -224,7 +226,7 @@ var failWithMessage;
}
- function PrettyPrint(value) {
+ prettyPrinted = function prettyPrinted(value) {
switch (typeof value) {
case "string":
return JSON.stringify(value);
@@ -247,11 +249,12 @@ var failWithMessage;
case "String":
case "Boolean":
case "Date":
- return objectClass + "(" + PrettyPrint(ValueOf(value)) + ")";
+ return objectClass + "(" + prettyPrinted(ValueOf(value)) + ")";
case "RegExp":
return RegExpPrototypeToString.call(value);
case "Array":
- var mapped = ArrayPrototypeMap.call(value, PrettyPrintArrayElement);
+ var mapped = ArrayPrototypeMap.call(
+ value, prettyPrintedArrayElement);
var joined = ArrayPrototypeJoin.call(mapped, ",");
return "[" + joined + "]";
case "Uint8Array":
@@ -279,9 +282,9 @@ var failWithMessage;
}
- function PrettyPrintArrayElement(value, index, array) {
+ function prettyPrintedArrayElement(value, index, array) {
if (value === undefined && !(index in array)) return "";
- return PrettyPrint(value);
+ return prettyPrinted(value);
}
@@ -296,7 +299,7 @@ var failWithMessage;
message += " (" + name_opt + ")";
}
- var foundText = PrettyPrint(found);
+ var foundText = prettyPrinted(found);
if (expectedText.length <= 40 && foundText.length <= 40) {
message += ": expected <" + expectedText + "> found <" + foundText + ">";
} else {
@@ -372,19 +375,29 @@ var failWithMessage;
} else if ((expected !== expected) && (found !== found)) {
return;
}
- fail(PrettyPrint(expected), found, name_opt);
+ fail(prettyPrinted(expected), found, name_opt);
};
+ assertNotSame = function assertNotSame(expected, found, name_opt) {
+ // TODO(mstarzinger): We should think about using Harmony's egal operator
+ // or the function equivalent Object.is() here.
+ if (found !== expected) {
+ if (expected === 0 || (1 / expected) !== (1 / found)) return;
+ } else if (!((expected !== expected) && (found !== found))) {
+ return;
+ }
+ fail(prettyPrinted(expected), found, name_opt);
+ }
assertEquals = function assertEquals(expected, found, name_opt) {
if (!deepEquals(found, expected)) {
- fail(PrettyPrint(expected), found, name_opt);
+ fail(prettyPrinted(expected), found, name_opt);
}
};
assertNotEquals = function assertNotEquals(expected, found, name_opt) {
if (deepEquals(found, expected)) {
- fail("not equals to " + PrettyPrint(expected), found, name_opt);
+ fail("not equals to " + prettyPrinted(expected), found, name_opt);
}
};
@@ -392,7 +405,7 @@ var failWithMessage;
assertEqualsDelta =
function assertEqualsDelta(expected, found, delta, name_opt) {
if (Math.abs(expected - found) > delta) {
- fail(PrettyPrint(expected) + " +- " + PrettyPrint(delta), found, name_opt);
+ fail(prettyPrinted(expected) + " +- " + prettyPrinted(delta), found, name_opt);
}
};
@@ -499,7 +512,7 @@ var failWithMessage;
if (typeof actualConstructor === "function") {
actualTypeName = actualConstructor.name || String(actualConstructor);
}
- failWithMessage("Object <" + PrettyPrint(obj) + "> is not an instance of <" +
+ failWithMessage("Object <" + prettyPrinted(obj) + "> is not an instance of <" +
(type.name || type) + ">" +
(actualTypeName ? " but of <" + actualTypeName + ">" : ""));
}
@@ -657,14 +670,6 @@ var failWithMessage;
return (opt_status & V8OptimizationStatus.kOptimized) !== 0;
}
- isCrankshafted = function isCrankshafted(fun) {
- var opt_status = OptimizationStatus(fun, "");
- assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
- "not a function");
- return (opt_status & V8OptimizationStatus.kOptimized) !== 0 &&
- (opt_status & V8OptimizationStatus.kTurboFanned) === 0;
- }
-
isTurboFanned = function isTurboFanned(fun) {
var opt_status = OptimizationStatus(fun, "");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
@@ -779,7 +784,7 @@ var failWithMessage;
equals(expected, found, name_opt) {
this.actualAsserts_++;
if (!deepEquals(expected, found)) {
- this.fail(PrettyPrint(expected), found, name_opt);
+ this.fail(prettyPrinted(expected), found, name_opt);
}
}
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index ebcdad1852..c3f6b22f89 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -87,7 +87,7 @@
'array-constructor': [PASS, SLOW, ['arch == arm and simulator == True', SKIP]],
# Very slow test
- 'regress/regress-crbug-808192' : [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
+ 'regress/regress-crbug-808192' : [PASS, SLOW, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips or arch == s390 or arch == s390x or arch == ppc or arch == ppc64', SKIP]],
# Very slow on ARM and MIPS, contains no architecture dependent code.
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
@@ -104,6 +104,7 @@
'generated-transition-stub': [PASS, ['mode == debug', SKIP]],
'migrations': [SKIP],
'array-functions-prototype-misc': [PASS, SLOW, ['mode == debug', SKIP]],
+ 'compiler/regress-808472': [PASS, ['mode == debug', SKIP]],
##############################################################################
# This test sets the umask on a per-process basis and hence cannot be
@@ -159,7 +160,6 @@
'regress/regress-605470': [PASS, SLOW],
'regress/regress-655573': [PASS, SLOW],
'regress/regress-1200351': [PASS, SLOW],
- 'regress/regress-crbug-474297': [PASS, ['gc_stress == False', SKIP]],
'wasm/embenchen/*': [PASS, SLOW],
'wasm/unreachable-validation': [PASS, SLOW],
@@ -242,6 +242,10 @@
'unicode-test': [SKIP],
'whitespaces': [SKIP],
+ # Unsuitable for GC stress because coverage information is lost on GC.
+ 'code-coverage-ad-hoc': [SKIP],
+ 'code-coverage-precise': [SKIP],
+
# TODO(mstarzinger): Takes too long with TF.
'array-sort': [PASS, NO_VARIANTS],
'regress/regress-91008': [PASS, NO_VARIANTS],
@@ -409,6 +413,7 @@
# Too slow.
'harmony/regexp-property-lu-ui': [SKIP],
+ 'regress/regress-165637': [SKIP],
# Flaky with baseline?
'regress/regress-2185-2': [SKIP],
@@ -633,16 +638,57 @@
'shared-function-tier-up-turbo': [SKIP],
# Fails deopt_fuzzer due to --deopt_every_n_times
- 'es6/array-iterator-turbo': [SKIP]
+ 'es6/array-iterator-turbo': [SKIP],
+
+ # BUG(v8:7466).
+ 'compiler/promise-constructor': [SKIP],
}], # 'deopt_fuzzer == True'
##############################################################################
-['gc_fuzzer == True', {
+['gc_fuzzer', {
'regress/regress-336820': [SKIP],
'regress/regress-748069': [SKIP],
'regress/regress-778668': [SKIP],
'ignition/regress-672027': [PASS, ['tsan', SKIP]],
-}], # 'gc_fuzzer == True'
+ 'string-replace-gc': [PASS, SLOW, ['mode == debug', SKIP]],
+
+ # Unsuitable for GC fuzzing because coverage information is lost on GC.
+ 'code-coverage-ad-hoc': [SKIP],
+ 'code-coverage-precise': [SKIP],
+
+ # Fails allocation on tsan.
+ 'es6/classes': [PASS, ['tsan', SKIP]],
+
+ # Tests that fail some assertions due to checking internal state sensitive
+ # to GC. We mark PASS,FAIL to not skip those tests on the endurance fuzzer.
+ 'array-literal-feedback': [PASS, FAIL],
+ 'compiler/native-context-specialization-hole-check': [PASS, FAIL],
+ 'elements-transition-hoisting': [PASS, FAIL],
+ 'es6/collections-constructor-custom-iterator': [PASS, FAIL],
+ 'keyed-load-with-symbol-key': [PASS, FAIL],
+ 'regress/regress-3709': [PASS, FAIL],
+ 'regress/regress-trap-allocation-memento': [PASS, FAIL],
+ 'regress/regress-unlink-closures-on-deopt': [PASS, FAIL],
+ 'shared-function-tier-up-turbo': [PASS, FAIL],
+}], # 'gc_fuzzer'
+
+##############################################################################
+['endurance_fuzzer', {
+ # BUG(v8:7400).
+ 'wasm/lazy-compilation': [SKIP],
+
+ # BUG(v8:7429).
+ 'regress/regress-599414-array-concat-fast-path': [SKIP],
+
+ # Often crashes due to memory consumption.
+ 'regress/regress-655573': [SKIP],
+
+ # TSAN allocation failures.
+ 'deep-recursion': [PASS, ['tsan', SKIP]],
+ 'regress/regress-430201b': [PASS, ['tsan', SKIP]],
+ 'regress/regress-crbug-493779': [PASS, ['tsan', SKIP]],
+ 'regress/wasm/regress-763439': [PASS, ['tsan', SKIP]],
+}], # 'endurance_fuzzer'
##############################################################################
['predictable == True', {
@@ -654,6 +700,8 @@
# BUG(v8:7166).
'd8/enable-tracing': [SKIP],
+ # Relies on async compilation which requires background tasks.
+ 'wasm/streaming-error-position': [SKIP],
}], # 'predictable == True'
##############################################################################
diff --git a/deps/v8/test/mjsunit/mjsunit_suppressions.js b/deps/v8/test/mjsunit/mjsunit_suppressions.js
new file mode 100644
index 0000000000..1bd466993f
--- /dev/null
+++ b/deps/v8/test/mjsunit/mjsunit_suppressions.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// This turns all mjsunit asserts into no-ops used for fuzzing.
+(function () {
+ failWithMessage = function () {}
+})();
diff --git a/deps/v8/test/mjsunit/optimized-reduce.js b/deps/v8/test/mjsunit/optimized-reduce.js
new file mode 100644
index 0000000000..efcb7ccd1c
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-reduce.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
+// Flags: --opt --no-always-opt
+
+// Make sure we gracefully handle the case of an empty array in
+// optimized code.
+(function() {
+ var nothingThere = function(only_holes) {
+ var a = [1,2,,3]; // holey smi array.
+ if (only_holes) {
+ a = [,,,]; // also a holey smi array.
+ }
+ return a.reduce((r,v,i,o)=>r+v);
+ }
+ nothingThere();
+ nothingThere();
+ %OptimizeFunctionOnNextCall(nothingThere);
+ assertThrows(() => nothingThere(true));
+})();
+
+// An error generated inside the callback includes reduce in it's
+// stack trace.
+(function() {
+ var re = /Array\.reduce/;
+ var alwaysThrows = function() {
+ var b = [,,,];
+ var result = 0;
+ var callback = function(r,v,i,o) {
+ return r + v;
+ };
+ b.reduce(callback);
+ }
+ try {
+ alwaysThrows();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ try { alwaysThrows(); } catch (e) {}
+ try { alwaysThrows(); } catch (e) {}
+ %OptimizeFunctionOnNextCall(alwaysThrows);
+ try {
+ alwaysThrows();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/optimized-reduceright.js b/deps/v8/test/mjsunit/optimized-reduceright.js
new file mode 100644
index 0000000000..2689a39de5
--- /dev/null
+++ b/deps/v8/test/mjsunit/optimized-reduceright.js
@@ -0,0 +1,49 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
+// Flags: --opt --no-always-opt
+
+// Make sure we gracefully handle the case of an empty array in
+// optimized code.
+(function() {
+ var nothingThere = function(only_holes) {
+ var a = [1,2,,3]; // holey smi array.
+ if (only_holes) {
+ a = [,,,]; // also a holey smi array.
+ }
+ return a.reduceRight((r,v,i,o)=>r+v);
+ }
+ nothingThere();
+ nothingThere();
+ %OptimizeFunctionOnNextCall(nothingThere);
+ assertThrows(() => nothingThere(true));
+})();
+
+// An error generated inside the callback includes reduce in it's
+// stack trace.
+(function() {
+ var re = /Array\.reduceRight/;
+ var alwaysThrows = function() {
+ var b = [,,,];
+ var result = 0;
+ var callback = function(r,v,i,o) {
+ return r + v;
+ };
+ b.reduceRight(callback);
+ }
+ try {
+ alwaysThrows();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ try { alwaysThrows(); } catch (e) {}
+ try { alwaysThrows(); } catch (e) {}
+ %OptimizeFunctionOnNextCall(alwaysThrows);
+ try {
+ alwaysThrows();
+ } catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-2470.js b/deps/v8/test/mjsunit/regress/regress-2470.js
index cba1b06c3e..708af54ca3 100644
--- a/deps/v8/test/mjsunit/regress/regress-2470.js
+++ b/deps/v8/test/mjsunit/regress/regress-2470.js
@@ -46,4 +46,4 @@ assertThrows('Function("a", "", "//", "//")', SyntaxError);
// Some embedders rely on the string representation of the resulting
// function in cases where no formal parameters are specified.
var asString = Function("return 23").toString();
-assertSame("function anonymous() {\nreturn 23\n}", asString);
+assertSame("function anonymous(\n) {\nreturn 23\n}", asString);
diff --git a/deps/v8/test/mjsunit/regress/regress-5010.js b/deps/v8/test/mjsunit/regress/regress-5010.js
deleted file mode 100644
index ecd4026dd8..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-5010.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-var bound = (function(){}).bind({});
-assertEquals("Function", %_ClassOf(bound));
-assertEquals("Function", %ClassOf(bound));
diff --git a/deps/v8/test/mjsunit/regress/regress-5691.js b/deps/v8/test/mjsunit/regress/regress-5691.js
new file mode 100644
index 0000000000..6cda92ca79
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5691.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var log = "";
+var result;
+Promise.resolve()
+ .then(() => log += "|turn1")
+ .then(() => log += "|turn2")
+ .then(() => log += "|turn3")
+ .then(() => log += "|turn4")
+ .then(() => result = "|start|turn1|fast-resolve|turn2|turn3|slow-resolve|turn4\n"+log)
+ .catch(e => print("ERROR", e));
+
+Promise.resolve(Promise.resolve()).then(() => log += "|fast-resolve");
+(class extends Promise {}).resolve(Promise.resolve()).then(() => log += "|slow-resolve");
+
+log += "|start";
+ %RunMicrotasks();
+assertEquals("|start|turn1|fast-resolve|turn2|turn3|slow-resolve|turn4\n\
+|start|turn1|fast-resolve|turn2|turn3|slow-resolve|turn4", result);
diff --git a/deps/v8/test/mjsunit/regress/regress-6703.js b/deps/v8/test/mjsunit/regress/regress-6703.js
new file mode 100644
index 0000000000..82bf21d55e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6703.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertTrue(/(\u039C)/i.test("\xB5"));
+assertTrue(/(\u039C)+/i.test("\xB5"));
+assertTrue(/(\u039C)/ui.test("\xB5"));
+assertTrue(/(\u039C)+/ui.test("\xB5"));
+
+assertTrue(/(\u03BC)/i.test("\xB5"));
+assertTrue(/(\u03BC)+/i.test("\xB5"));
+assertTrue(/(\u03BC)/ui.test("\xB5"));
+assertTrue(/(\u03BC)+/ui.test("\xB5"));
+
+assertTrue(/(\u03BC)/i.test("\u039C"));
+assertTrue(/(\u03BC)+/i.test("\u039C"));
+assertTrue(/(\u03BC)/ui.test("\u039C"));
+assertTrue(/(\u03BC)+/ui.test("\u039C"));
+
+assertTrue(/(\u0178)/i.test("\xFF"));
+assertTrue(/(\u0178)+/i.test("\xFF"));
+assertTrue(/(\u0178)/ui.test("\xFF"));
+assertTrue(/(\u0178)+/ui.test("\xFF"));
diff --git a/deps/v8/test/mjsunit/regress/regress-7369.js b/deps/v8/test/mjsunit/regress/regress-7369.js
new file mode 100644
index 0000000000..de715ef6dc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-7369.js
@@ -0,0 +1,8 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(-Infinity, 1/parseInt(-0.9));
+assertEquals(-Infinity, 1/parseInt("-0.9"));
+assertEquals(-Infinity, 1/parseInt(-0.09));
+assertEquals(-Infinity, 1/parseInt(-0.009));
diff --git a/deps/v8/test/mjsunit/regress/regress-800651.js b/deps/v8/test/mjsunit/regress/regress-800651.js
new file mode 100644
index 0000000000..420cc7c294
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-800651.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var list = [];
+function log(item) { list.push(item); }
+async function f() {
+ try {
+ let namespace = await import(/a/);
+ } catch(e) {
+ log(1);
+ }
+}
+f();
+
+async function g() {
+ try {
+ let namespace = await import({ get toString() { return undefined; }});
+ } catch(e) {
+ log(2);
+ }
+}
+g();
+ %RunMicrotasks();
+assertEquals(list, [1,2]);
diff --git a/deps/v8/test/mjsunit/regress/regress-803750.js b/deps/v8/test/mjsunit/regress/regress-803750.js
new file mode 100644
index 0000000000..3541ff9a35
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-803750.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Verify that very large arrays can be constructed.
+assertEquals(Array.isArray(Array.of.apply(Array, Array(65536))), true);
+assertEquals(Array.isArray(Array.of.apply(null, Array(65536))), true);
diff --git a/deps/v8/test/mjsunit/regress/regress-804096.js b/deps/v8/test/mjsunit/regress/regress-804096.js
new file mode 100644
index 0000000000..43a2ef4754
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-804096.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --opt
+
+for (let i = 0; i < 5000; i++) {
+ try {
+ [].reduce(function() {});
+ } catch (x) {
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-804176.js b/deps/v8/test/mjsunit/regress/regress-804176.js
new file mode 100644
index 0000000000..ab36c8ccbb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-804176.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const set_entries = [{}];
+set_entries[Symbol.iterator] = function() {};
+assertThrows(() => new Set(set_entries), TypeError);
+assertThrows(() => new WeakSet(set_entries), TypeError);
+
+const map_entries = [[{}, 1]];
+map_entries[Symbol.iterator] = function() {};
+assertThrows(() => new Set(map_entries), TypeError);
+assertThrows(() => new WeakSet(map_entries), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-804177.js b/deps/v8/test/mjsunit/regress/regress-804177.js
new file mode 100644
index 0000000000..b100480599
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-804177.js
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests that insertion at the beginning via unshift won't crash when using a
+// constructor that creates an array larger than normal. (Also values inserted
+// by original constructor past the end should not survive into the result of
+// unshift).
+(function testUnshift() {
+ a = [1];
+ function f() {
+ return a;
+ }
+ b = Array.of.call(f);
+ b.unshift(2);
+ assertEquals(b, [2]);
+})();
+
+// Tests that insertion past the end won't expose values previously put into the
+// backing store by using a constructor that creates an array larger than normal.
+(function testInsertionPastEnd() {
+ a = [9,9,9,9];
+ function f() {
+ return a;
+ }
+ b = Array.of.call(f,1,2);
+ b[4] = 1;
+ assertEquals(b, [1, 2, undefined, undefined, 1]);
+})();
+
+// Tests that using Array.of with a constructor returning an object with an
+// unwriteable length throws a TypeError.
+(function testFrozenArrayThrows() {
+ function f() {
+ return Object.freeze([1,2,3]);
+ }
+ assertThrows(function() { Array.of.call(f); }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-804188.js b/deps/v8/test/mjsunit/regress/regress-804188.js
new file mode 100644
index 0000000000..72fe8ef651
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-804188.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Array.prototype, Symbol.iterator, {
+ value: function* () {}
+});
+const arrayIteratorProto = Object.getPrototypeOf([][Symbol.iterator]());
+arrayIteratorProto.next = function() {};
+
+assertThrows(() => new Map([[{}, 1], [{}, 2]]), TypeError);
+assertThrows(() => new WeakMap([[{}, 1], [{}, 2]]), TypeError);
+assertThrows(() => new Set([{}]), TypeError);
+assertThrows(() => new WeakSet([{}]), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-804288.js b/deps/v8/test/mjsunit/regress/regress-804288.js
new file mode 100644
index 0000000000..6f7855d68c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-804288.js
@@ -0,0 +1,9 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var arr = [{}];
+Object.setPrototypeOf(arr, {});
+var ta = new Uint8Array(arr);
+
+let kDeclNoLocals = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-804801.js b/deps/v8/test/mjsunit/regress/regress-804801.js
new file mode 100644
index 0000000000..daec8dc92f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-804801.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() { return 42; }
+const bound_function = f.bind();
+const callable_proxy = new Proxy(function(){}.__proto__, {});
+
+function testSet(ctor) {
+ new ctor([]);
+ new ctor([{},{}]);
+}
+
+function testMap(ctor) {
+ new ctor([]);
+ new ctor([[{},{}],[{},{}]]);
+}
+
+function testAllVariants(set_or_add_function) {
+ Set.prototype.add = set_or_add_function;
+ testSet(Set);
+
+ WeakSet.prototype.add = set_or_add_function;
+ testSet(WeakSet);
+
+ Map.prototype.set = set_or_add_function;
+ testMap(Map);
+
+ WeakMap.prototype.set = set_or_add_function;
+ testMap(WeakMap);
+}
+
+testAllVariants(bound_function);
+testAllVariants(callable_proxy);
diff --git a/deps/v8/test/mjsunit/regress/regress-804837.js b/deps/v8/test/mjsunit/regress/regress-804837.js
new file mode 100644
index 0000000000..2e5e603a7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-804837.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --opt --allow-natives-syntax
+
+var __v_25662 = [, 1.8];
+function __f_6214(__v_25668) {
+ __v_25662.reduce(() => {1});
+}
+__f_6214();
+__f_6214();
+%OptimizeFunctionOnNextCall(__f_6214);
+__f_6214();
diff --git a/deps/v8/test/mjsunit/regress/regress-805729.js b/deps/v8/test/mjsunit/regress/regress-805729.js
new file mode 100644
index 0000000000..0703aa2ad8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-805729.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+async function* asyncGenerator() {};
+let gen = asyncGenerator();
+gen.return({ get then() { delete this.then; gen.next(); } });
diff --git a/deps/v8/test/mjsunit/regress/regress-805768.js b/deps/v8/test/mjsunit/regress/regress-805768.js
new file mode 100644
index 0000000000..e36106fdb3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-805768.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ var a = [''];
+ print(a[0]);
+ return a;
+}
+
+function bar(a) { a[0] = "bazinga!"; }
+
+for (var i = 0; i < 5; i++) bar([]);
+
+%OptimizeFunctionOnNextCall(bar);
+bar(foo());
+assertEquals([''], foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-812451.js b/deps/v8/test/mjsunit/regress/regress-812451.js
new file mode 100644
index 0000000000..8ccad1d124
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-812451.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+var x = [];
+function foo(x, p) {
+ x[p] = 5.3;
+}
+foo(x, 1);
+foo(x, 2);
+foo(x, -1);
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-814643.js b/deps/v8/test/mjsunit/regress/regress-814643.js
new file mode 100644
index 0000000000..085c56fe84
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-814643.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Number.prototype.__proto__ = String.prototype;
+Uint8Array.from(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-816289.js b/deps/v8/test/mjsunit/regress/regress-816289.js
new file mode 100644
index 0000000000..4a3a4b629d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-816289.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+delete String.prototype[Symbol.iterator];
+Int8Array.from("anything");
diff --git a/deps/v8/test/mjsunit/regress/regress-816317.js b/deps/v8/test/mjsunit/regress/regress-816317.js
new file mode 100644
index 0000000000..304786e694
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-816317.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let a = new Float64Array(15);
+Object.defineProperty(a, "length", {
+ get: function () {
+ return 6;
+ }
+});
+delete a.__proto__.__proto__[Symbol.iterator];
+Float64Array.from(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-821137.js b/deps/v8/test/mjsunit/regress/regress-821137.js
new file mode 100644
index 0000000000..639b3b998a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-821137.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests that creating an iterator that shrinks the array populated by
+// Array.from does not lead to out of bounds writes.
+let oobArray = [];
+let maxSize = 1028 * 8;
+Array.from.call(function() { return oobArray }, {[Symbol.iterator] : _ => (
+ {
+ counter : 0,
+ next() {
+ let result = this.counter++;
+ if (this.counter > maxSize) {
+ oobArray.length = 0;
+ return {done: true};
+ } else {
+ return {value: result, done: false};
+ }
+ }
+ }
+) });
+assertEquals(oobArray.length, maxSize);
+
+// iterator reset the length to 0 just before returning done, so this will crash
+// if the backing store was not resized correctly.
+oobArray[oobArray.length - 1] = 0x41414141;
diff --git a/deps/v8/test/mjsunit/regress/regress-995.js b/deps/v8/test/mjsunit/regress/regress-995.js
index 3f99179104..cf680da1e7 100644
--- a/deps/v8/test/mjsunit/regress/regress-995.js
+++ b/deps/v8/test/mjsunit/regress/regress-995.js
@@ -39,14 +39,6 @@ function f(value) {
}
f(new String("bar"));
-// HClassOf.
-function g(value) {
- if (%_ClassOf(value) === 'Date') {
- if (%_ClassOf(value) === 'String') assertTrue(false);
- }
-}
-g(new Date());
-
// HIsNull.
function h(value) {
if (value == null) {
diff --git a/deps/v8/test/mjsunit/regress/regress-charat-empty.js b/deps/v8/test/mjsunit/regress/regress-charat-empty.js
new file mode 100644
index 0000000000..6548ad5b2f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-charat-empty.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --opt --no-always-opt --allow-natives-syntax
+(() => {
+ function f(s) {
+ return s.charAt();
+ }
+ f("");
+ f("");
+ %OptimizeFunctionOnNextCall(f);
+ f("");
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-109362.js b/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
index 20285f614d..cf7cd4e5fa 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
@@ -14,15 +14,16 @@ function test(expectation, f) {
}
/*
-(function() {
+(function(
+) {
1 + reference_error //@ sourceURL=evaltest
})
*/
-test("2:5", new Function(
+test("3:5", new Function(
'1 + reference_error //@ sourceURL=evaltest'));
/*
(function(x
-/\**\/) {
+) {
1 + reference_error //@ sourceURL=evaltest
})
@@ -34,7 +35,7 @@ test("4:6", new Function(
,z//
,y
-/\**\/) {
+) {
1 + reference_error //@ sourceURL=evaltest
})
@@ -44,7 +45,7 @@ test("7:6", new Function(
/*
(function(x/\*,z//
,y*\/
-/\**\/) {
+) {
1 + reference_error //@ sourceURL=evaltest
})
*/
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-474297.js b/deps/v8/test/mjsunit/regress/regress-crbug-474297.js
deleted file mode 100644
index ce240251bd..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-474297.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --gc-interval=33 --expose-gc --allow-natives-syntax
-
-var Debug = %GetDebugContext().Debug;
-Debug.setListener(function(){});
-
-%DebugGetLoadedScripts();
-
-Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-663410.js b/deps/v8/test/mjsunit/regress/regress-crbug-663410.js
index 5bef089f11..6d2a487715 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-663410.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-663410.js
@@ -4,5 +4,5 @@
function alert(x) {};
assertThrows(
- Function("a=`","`,xss=1){alert(xss)")
+ 'Function("a=`","`,xss=1){alert(xss)")'
);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-802333.js b/deps/v8/test/mjsunit/regress/regress-crbug-802333.js
new file mode 100644
index 0000000000..35d762187b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-802333.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function deferred_func() {
+ class C {
+ method1() {
+
+ }
+ }
+}
+
+let bound = (a => a).bind(this, 0);
+
+function opt() {
+ deferred_func.prototype; // ReduceJSLoadNamed
+
+ return bound();
+}
+
+assertEquals(0, opt());
+%OptimizeFunctionOnNextCall(opt);
+
+assertEquals(0, opt());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-805765.js b/deps/v8/test/mjsunit/regress/regress-crbug-805765.js
new file mode 100644
index 0000000000..b630efb4c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-805765.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var code = "(function* gen() {"
+for (var i = 0; i < 256; ++i) {
+ code += `var v_${i} = 0;`
+}
+code += `yield; })`
+
+var gen = eval(code);
+var g = gen();
+g.next();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-806200.js b/deps/v8/test/mjsunit/regress/regress-crbug-806200.js
new file mode 100644
index 0000000000..dbd66019a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-806200.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertThrows("%Foo(...spread)", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-808192.js b/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
index 3336c0043e..f57d5fc3a6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-808192.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(cbruni): enable always opt once v8:7438
-// Flags: --expose-gc --no-always-opt
+// Flags: --expose-gc
const f = eval(`(function f(i) {
if (i == 0) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-813450.js b/deps/v8/test/mjsunit/regress/regress-crbug-813450.js
new file mode 100644
index 0000000000..b301012950
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-813450.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var constructorArgs = new Array(0x10100);
+var constructor = function() {};
+var target = new Proxy(constructor, {
+ construct: function() {
+ }
+});
+var proxy = new Proxy(target, {
+ construct: function(newTarget, args) {
+ return Reflect.construct(constructor, []);
+ }
+});
+var instance = new proxy();
+var instance2 = Reflect.construct(proxy, constructorArgs);
+%HeapObjectVerify(target);
+%HeapObjectVerify(proxy);
+%HeapObjectVerify(instance);
+%HeapObjectVerify(instance2);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-816961.js b/deps/v8/test/mjsunit/regress/regress-crbug-816961.js
new file mode 100644
index 0000000000..c1637ad2c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-816961.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(function() {
+ var memory = new WebAssembly.Memory({initial: 64 * 1024 * 1024 / 0x10000});
+ var array = new Uint8Array(memory.buffer);
+ Uint8Array.of.call(function() { return array },
+ {valueOf() { memory.grow(1); } });
+}, TypeError);
+
+assertThrows(function() {
+ var memory = new WebAssembly.Memory({initial: 64 * 1024 * 1024 / 0x10000});
+ var array = new Uint8Array(memory.buffer);
+ Uint8Array.from.call(function() { return array },
+ [{valueOf() { memory.grow(1); } }],
+ x => x);
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-822284.js b/deps/v8/test/mjsunit/regress/regress-crbug-822284.js
new file mode 100644
index 0000000000..97a38259e3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-822284.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) {
+ a = "" + Math.abs(a);
+ return a.charCodeAt(0);
+}
+
+// Add '1' to the number to string table (as SeqString).
+String.fromCharCode(49);
+
+// Turn the SeqString into a ThinString via forced internalization.
+const o = {};
+o[(1).toString()] = 1;
+
+assertEquals(49, foo(1));
+assertEquals(49, foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(49, foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js b/deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js
new file mode 100644
index 0000000000..2e14aa027c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --opt --allow-natives-syntax
+
+(() => {
+ function f(u) {
+ for (var j = 0; j < 20; ++j) {
+ print("" + u.codePointAt());
+ }
+ }
+
+ f("test");
+ f("foo");
+ %OptimizeFunctionOnNextCall(f);
+ f("");
+})();
diff --git a/deps/v8/test/mjsunit/regress/string-next-encoding.js b/deps/v8/test/mjsunit/regress/string-next-encoding.js
new file mode 100644
index 0000000000..27b99a9c9e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/string-next-encoding.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --opt --allow-natives-syntax
+
+function f() {
+ var s = "äϠ�𝌆";
+ var i = s[Symbol.iterator]();
+ assertEquals("ä", i.next().value);
+ assertEquals("Ϡ", i.next().value);
+ assertEquals("�", i.next().value);
+ assertEquals("𝌆", i.next().value);
+ assertSame(undefined, i.next().value);
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7353.js b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
new file mode 100644
index 0000000000..d41cbabf36
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7353.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32);
+builder.addFunction('grow', kSig_i_i).addBody([
+ kExprGetLocal, 0,
+ kExprGrowMemory, 0,
+]).exportFunc();
+builder.addFunction('main', kSig_i_i).addBody([
+ ...wasmI32Const(0x41),
+ kExprSetLocal, 0,
+ // Enter loop, such that values are spilled to the stack.
+ kExprLoop, kWasmStmt,
+ kExprEnd,
+ // Reload value. This must be loaded as 32 bit value.
+ kExprGetLocal, 0,
+ kExprI32LoadMem, 0, 0,
+]).exportFunc();
+const instance = builder.instantiate();
+// Execute grow, such that the stack contains garbage data afterwards.
+instance.exports.grow(1);
+instance.exports.main();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7364.js b/deps/v8/test/mjsunit/regress/wasm/regress-7364.js
new file mode 100644
index 0000000000..8e66295b70
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7364.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const exportingModuleBinary = (() => {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 42]).exportFunc();
+ return builder.toBuffer();
+})();
+
+const exportingModule = new WebAssembly.Module(exportingModuleBinary);
+const exportingInstance = new WebAssembly.Instance(exportingModule);
+
+const reExportingModuleBinary = (() => {
+ const builder = new WasmModuleBuilder();
+ const gIndex = builder.addImport('a', 'g', kSig_i_v);
+ builder.addExport('y', gIndex);
+ return builder.toBuffer();
+})();
+
+const module = new WebAssembly.Module(reExportingModuleBinary);
+const imports = {
+ a: {g: exportingInstance.exports.f},
+};
+const instance = new WebAssembly.Instance(module, imports);
+
+// Previously exported Wasm functions are re-exported with the same value
+assertEquals(instance.exports.y, exportingInstance.exports.f);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7366.js b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
new file mode 100644
index 0000000000..41f758efb1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7366.js
@@ -0,0 +1,33 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_i_iii).addBody([
+ // Return the sum of all arguments.
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kExprI32Add, kExprI32Add
+]);
+const sig = builder.addType(kSig_i_iii);
+builder.addFunction(undefined, kSig_i_iii)
+ .addBody([
+ ...wasmI32Const(1), // i32.const 0x1
+ kExprSetLocal, 0, // set_local 0
+ ...wasmI32Const(4), // i32.const 0x1
+ kExprSetLocal, 1, // set_local 1
+ ...wasmI32Const(16), // i32.const 0x1
+ kExprSetLocal, 2, // set_local 2
+ kExprLoop, kWasmStmt, // loop
+ kExprEnd, // end
+ kExprGetLocal, 0, // get_local 0
+ kExprGetLocal, 1, // get_local 1
+ kExprGetLocal, 2, // get_local 2
+ kExprI32Const, 0, // i32.const 0 (func index)
+ kExprCallIndirect, sig, 0, // call indirect
+ ])
+ .exportAs('main');
+builder.appendToTable([0]);
+const instance = builder.instantiate();
+assertEquals(21, instance.exports.main());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7422.js b/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
new file mode 100644
index 0000000000..87896b4c35
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7422.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+sig = makeSig([kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32], [kWasmI32]);
+builder.addFunction(undefined, sig).addBody([kExprGetLocal, 4]);
+builder.addMemory(16, 32);
+builder.addFunction('main', sig)
+ .addBody([
+ kExprI32Const, 0, kExprSetLocal, 0,
+ // Compute five arguments to the function call.
+ kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 0,
+ kExprGetLocal, 4, kExprI32Const, 1, kExprI32Add,
+ // Now some intermediate computation to force the arguments to be spilled
+ // to the stack:
+ kExprGetLocal, 0, kExprI32Const, 1, kExprI32Add, kExprGetLocal, 1,
+ kExprGetLocal, 1, kExprI32Add, kExprI32Add, kExprDrop,
+ // Now call the function.
+ kExprCallFunction, 0
+ ])
+ .exportFunc();
+var instance = builder.instantiate();
+assertEquals(11, instance.exports.main(2, 4, 6, 8, 10));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-7499.js b/deps/v8/test/mjsunit/regress/wasm/regress-7499.js
new file mode 100644
index 0000000000..71f246decf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-7499.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32);
+builder.addFunction(undefined, kSig_v_v).addBody([
+ kExprI32Const, 0, // i32.const 0
+ kExprI64LoadMem, 0, 0xff, 0xff, 0xff, 0xff,
+ 0x0f, // i64.load align=0 offset=0xffffffff
+ kExprDrop, // drop
+]);
+builder.addExport('main', 0);
+const module = builder.instantiate();
+assertThrows(
+ () => module.exports.main(), WebAssembly.RuntimeError, /out of bounds/);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-801785.js b/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
new file mode 100644
index 0000000000..1870d7e8f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-801785.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Flags: --print-wasm-code
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(8, 16);
+builder.addFunction(undefined, kSig_i_i).addBody([
+ // wasm to wasm call.
+ kExprGetLocal, 0, kExprCallFunction, 0x1
+]);
+builder.addFunction(undefined, kSig_i_i).addBody([
+ // load from <get_local 0> to create trap code.
+ kExprGetLocal, 0, kExprI32LoadMem, 0,
+ // unreachable to create a runtime call.
+ kExprUnreachable
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-803427.js b/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
new file mode 100644
index 0000000000..833b140fd4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-803427.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+let module = new WebAssembly.Module(builder.toBuffer());
+var worker = new Worker('onmessage = function() {};');
+worker.postMessage(module)
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-803788.js b/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
new file mode 100644
index 0000000000..8edec7c464
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-803788.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+let q_table = builder.addImportedTable("q", "table")
+let q_base = builder.addImportedGlobal("q", "base", kWasmI32);
+let q_fun = builder.addImport("q", "fun", kSig_v_v);
+builder.addType(kSig_i_ii);
+builder.addFunctionTableInit(q_base, true, [ q_fun ])
+let module = new WebAssembly.Module(builder.toBuffer());
+let table = new WebAssembly.Table({
+ element: "anyfunc",
+ initial: 10,
+});
+let instance = new WebAssembly.Instance(module, {
+ q: {
+ base: 0,
+ table: table,
+ fun: () => (0)
+ }
+});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-808012.js b/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
new file mode 100644
index 0000000000..1b91f226a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-808012.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction('test', kSig_i_i).addBody([kExprUnreachable]);
+let module = new WebAssembly.Module(builder.toBuffer());
+var worker = new Worker('onmessage = function() {};');
+worker.postMessage(module);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-810973.js b/deps/v8/test/mjsunit/regress/wasm/regress-810973.js
new file mode 100644
index 0000000000..5a776884ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-810973.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+ this.WScript = new Proxy({}, {
+ get() {
+ switch (name) {
+ }
+ }
+ });
+function MjsUnitAssertionError() {
+};
+let __v_692 = `(function module() { "use asm";function foo(`;
+const __v_693 =
+3695;
+for (let __v_695 = 0; __v_695 < __v_693; ++__v_695) {
+ __v_692 += `arg${__v_695},`;
+}
+try {
+ __v_692 += `arg${__v_693}){`;
+} catch (e) {}
+for (let __v_696 = 0; __v_696 <= __v_693; ++__v_696) {
+ __v_692 += `arg${__v_696}=+arg${__v_696};`;
+}
+ __v_692 += "return 10;}function bar(){return foo(";
+for (let __v_697 = 0; __v_697 < __v_693; ++__v_697) {
+ __v_692 += "0.0,";
+}
+ __v_692 += "1.0)|0;}";
+
+ __v_692 += "return bar})()()";
+const __v_694 = eval(__v_692);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-812005.js b/deps/v8/test/mjsunit/regress/wasm/regress-812005.js
new file mode 100644
index 0000000000..979b769bbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-812005.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_d_v).addBody([
+ ...wasmF64Const(0), // f64.const 0
+ ...wasmF64Const(0), // f64.const 0
+ ...wasmI32Const(0), // i32.const 0
+ kExprBrIf, 0x00, // br_if depth=0
+ kExprF64Add // f64.add
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-816226.js b/deps/v8/test/mjsunit/regress/wasm/regress-816226.js
new file mode 100644
index 0000000000..a9cb715570
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-816226.js
@@ -0,0 +1,5 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(new Int8Array((new WebAssembly.Memory({})).buffer)).buffer;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-819869.js b/deps/v8/test/mjsunit/regress/wasm/regress-819869.js
new file mode 100644
index 0000000000..f2606fb610
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-819869.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_i_i)
+ .addLocals({i32_count: 0xffffffff})
+ .addBody([]);
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/skipping-inner-functions.js b/deps/v8/test/mjsunit/skipping-inner-functions.js
index e228b258c4..fba9a3e277 100644
--- a/deps/v8/test/mjsunit/skipping-inner-functions.js
+++ b/deps/v8/test/mjsunit/skipping-inner-functions.js
@@ -365,3 +365,15 @@ TestSkippableFunctionInForOfHeaderAndBody();
function lazy(p = (function() {}, class {}, function() {}, class { method1() { } })) { }
lazy();
})();
+
+(function TestOneByteTwoByteMismatch() {
+ // Regression test for
+ // https://bugs.chromium.org/p/v8/issues/detail?id=7428
+
+ let name = 'weird_string\u2653'.slice(0, 12);
+ let o = {};
+ o[name] = null;
+ var x;
+ eval('x = function weird_string() { function skip() {} };');
+ x();
+})();
diff --git a/deps/v8/test/mjsunit/string-charcodeat-external.js b/deps/v8/test/mjsunit/string-charcodeat-external.js
new file mode 100644
index 0000000000..8b291dad33
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-charcodeat-external.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-externalize-string --expose-gc --allow-natives-syntax
+
+function foo(s) {
+ return s.charCodeAt(12);
+}
+
+var extern = "internalized dummy";
+extern = "1234567890qiaipppiúöäöáœba"+"jalsdjasldjasdlasjdalsdjasldk";
+externalizeString(extern, true /* force two-byte */);
+assertEquals(97, foo(extern));
+assertEquals(97, foo(extern));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(97, foo(extern));
diff --git a/deps/v8/test/mjsunit/string-deopt.js b/deps/v8/test/mjsunit/string-deopt.js
new file mode 100644
index 0000000000..15549186ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-deopt.js
@@ -0,0 +1,57 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+(()=> {
+ function f(a, b, c) {
+ return a.indexOf(b, c);
+ }
+ f("abc", "de", 1);
+ f("abc", "de", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", "de", {});
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", "de", {});
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a, b, c) {
+ return a.indexOf(b, c);
+ }
+ f("abc", "de", 1);
+ f("abc", "de", 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {}, 1);
+ %OptimizeFunctionOnNextCall(f);
+ f("abc", {}, 1);
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a, b, c) {
+ return a.substring(b, c);
+ }
+ f("abcde", 1, 4);
+ f("abcde", 1, 4);
+ %OptimizeFunctionOnNextCall(f);
+ f("abcde", 1, {});
+ %OptimizeFunctionOnNextCall(f);
+ f("abcde", 1, {});
+ assertOptimized(f);
+})();
+
+(()=> {
+ function f(a, b, c) {
+ return a.substring(b, c);
+ }
+ f("abcde", 1, 4);
+ f("abcde", 1, 4);
+ %OptimizeFunctionOnNextCall(f);
+ f("abcde", {}, 4);
+ %OptimizeFunctionOnNextCall(f);
+ f("abcde", {}, 4);
+ assertOptimized(f);
+})();
diff --git a/deps/v8/test/mjsunit/string-trim.js b/deps/v8/test/mjsunit/string-trim.js
new file mode 100644
index 0000000000..201a34f1c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-trim.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-string-trimming
+
+assertEquals('trim', String.prototype.trim.name);
+assertEquals('trimStart', String.prototype.trimStart.name);
+assertEquals('trimStart', String.prototype.trimLeft.name);
+assertEquals('trimEnd', String.prototype.trimEnd.name);
+assertEquals('trimEnd', String.prototype.trimRight.name);
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index bc9d69ff33..d31a189ba2 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -25,11 +25,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from collections import OrderedDict
+import itertools
import os
import re
+from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.objects import testcase
+from testrunner.outproc import base as outproc
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
@@ -37,9 +41,22 @@ SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
NO_HARNESS_PATTERN = re.compile(r"^// NO HARNESS$", flags=re.MULTILINE)
+# Flags known to misbehave when combining arbitrary mjsunit tests. Can also
+# be compiled regular expressions.
+COMBINE_TESTS_FLAGS_BLACKLIST = [
+ '--check-handle-count',
+ '--enable-tracing',
+ re.compile('--experimental.*'),
+ '--expose-trigger-failure',
+ re.compile('--harmony.*'),
+ '--mock-arraybuffer-allocator',
+ '--print-ast',
+ re.compile('--trace.*'),
+ '--wasm-lazy-compilation',
+]
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root, followlinks=True):
for dotted in [x for x in dirs if x.startswith('.')]:
@@ -47,7 +64,9 @@ class TestSuite(testsuite.TestSuite):
dirs.sort()
files.sort()
for filename in files:
- if filename.endswith(".js") and filename != "mjsunit.js":
+ if (filename.endswith(".js") and
+ filename != "mjsunit.js" and
+ filename != "mjsunit_suppressions.js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
@@ -55,9 +74,15 @@ class TestSuite(testsuite.TestSuite):
tests.append(test)
return tests
+ def _test_combiner_class(self):
+ return TestCombiner
+
def _test_class(self):
return TestCase
+ def _suppressed_test_class(self):
+ return SuppressedTestCase
+
class TestCase(testcase.TestCase):
def __init__(self, *args, **kwargs):
@@ -111,12 +136,12 @@ class TestCase(testcase.TestCase):
def _get_source_flags(self):
return self._source_flags
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
files = list(self._source_files)
- if not ctx.no_harness:
+ if not self._test_config.no_harness:
files += self._mjsunit_files
files += self._files_suffix
- if ctx.isolates:
+ if self._test_config.isolates:
files += ['--isolate'] + files
return files
@@ -128,5 +153,157 @@ class TestCase(testcase.TestCase):
return os.path.join(self.suite.root, self.path + self._get_suffix())
-def GetSuite(name, root):
- return TestSuite(name, root)
+class TestCombiner(testsuite.TestCombiner):
+ def get_group_key(self, test):
+ """Combine tests with the same set of flags.
+ Ignore:
+ 1. Some special cases where it's not obvious what to pass in the command.
+ 2. Tests with flags that can cause failure even inside try-catch wrapper.
+ 3. Tests that use async functions. Async functions can be scheduled after
+ exiting from try-catch wrapper and cause failure.
+ """
+ if (len(test._files_suffix) > 1 or
+ test._env or
+ not test._mjsunit_files or
+ test._source_files):
+ return None
+
+ source_flags = test._get_source_flags()
+ if ('--expose-trigger-failure' in source_flags or
+ '--throws' in source_flags):
+ return None
+
+ source_code = test.get_source()
+ # Maybe we could just update the tests to await all async functions they
+ # call?
+ if 'async' in source_code:
+ return None
+
+ # TODO(machenbach): Remove grouping if combining tests in a flag-independent
+ # way works well.
+ return 1
+
+ def _combined_test_class(self):
+ return CombinedTest
+
+
+class CombinedTest(testcase.TestCase):
+ """Behaves like normal mjsunit tests except:
+ 1. Expected outcome is always PASS
+ 2. Instead of one file there is a try-catch wrapper with all combined tests
+ passed as arguments.
+ """
+ def __init__(self, name, tests):
+ super(CombinedTest, self).__init__(tests[0].suite, '', name,
+ tests[0]._test_config)
+ self._tests = tests
+
+ def _prepare_outcomes(self, force_update=True):
+ self._statusfile_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+ self.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+
+ def _get_shell_with_flags(self):
+ """In addition to standard set of shell flags it appends:
+ --disable-abortjs: %AbortJS can abort the test even inside
+ trycatch-wrapper, so we disable it.
+ --es-staging: We blacklist all harmony flags due to false positives,
+ but always pass the staging flag to cover the mature features.
+ --omit-quit: Calling quit() in JS would otherwise early terminate.
+ --quiet-load: suppress any stdout from load() function used by
+ trycatch-wrapper.
+ """
+ shell = 'd8'
+ shell_flags = [
+ '--test',
+ '--disable-abortjs',
+ '--es-staging',
+ '--omit-quit',
+ '--quiet-load',
+ ]
+ return shell, shell_flags
+
+ def _get_cmd_params(self):
+ return (
+ super(CombinedTest, self)._get_cmd_params() +
+ ['tools/testrunner/trycatch_loader.js', '--'] +
+ self._tests[0]._mjsunit_files +
+ ['--'] +
+ [t._files_suffix[0] for t in self._tests]
+ )
+
+ def _merge_flags(self, flags):
+ """Merges flags from a list of flags.
+
+ Flag values not starting with '-' are merged with the preceeding flag,
+ e.g. --foo 1 will become --foo=1. All other flags remain the same.
+
+ Returns: A generator of flags.
+ """
+ if not flags:
+ return
+ # Iterate over flag pairs. ['-'] is a sentinel value for the last iteration.
+ for flag1, flag2 in itertools.izip(flags, flags[1:] + ['-']):
+ if not flag2.startswith('-'):
+ assert '=' not in flag1
+ yield flag1 + '=' + flag2
+ elif flag1.startswith('-'):
+ yield flag1
+
+ def _is_flag_blacklisted(self, flag):
+ for item in COMBINE_TESTS_FLAGS_BLACKLIST:
+ if isinstance(item, basestring):
+ if item == flag:
+ return True
+ elif item.match(flag):
+ return True
+ return False
+
+ def _get_combined_flags(self, flags_gen):
+ """Combines all flags - dedupes, keeps order and filters some flags.
+
+ Args:
+ flags_gen: Generator for flag lists.
+ Returns: A list of flags.
+ """
+ merged_flags = self._merge_flags(list(itertools.chain(*flags_gen)))
+ unique_flags = OrderedDict((flag, True) for flag in merged_flags).keys()
+ return [
+ flag for flag in unique_flags
+ if not self._is_flag_blacklisted(flag)
+ ]
+
+ def _get_source_flags(self):
+ # Combine flags from all source files.
+ return self._get_combined_flags(
+ test._get_source_flags() for test in self._tests)
+
+ def _get_statusfile_flags(self):
+ # Combine flags from all status file entries.
+ return self._get_combined_flags(
+ test._get_statusfile_flags() for test in self._tests)
+
+
+class SuppressedTestCase(TestCase):
+ """The same as a standard mjsunit test case with all asserts as no-ops."""
+ def __init__(self, *args, **kwargs):
+ super(SuppressedTestCase, self).__init__(*args, **kwargs)
+ self._mjsunit_files.append(
+ os.path.join(self.suite.root, "mjsunit_suppressions.js"))
+
+ def _prepare_outcomes(self, *args, **kwargs):
+ super(SuppressedTestCase, self)._prepare_outcomes(*args, **kwargs)
+ # Skip tests expected to fail. We suppress all asserts anyways, but some
+ # tests are expected to fail with type errors or even dchecks, and we
+ # can't differentiate that.
+ if statusfile.FAIL in self._statusfile_outcomes:
+ self._statusfile_outcomes = [statusfile.SKIP]
+
+ def _get_extra_flags(self, *args, **kwargs):
+ return (
+ super(SuppressedTestCase, self)._get_extra_flags(*args, **kwargs) +
+ ['--disable-abortjs']
+ )
+
+
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/mjsunit/wasm/OWNERS b/deps/v8/test/mjsunit/wasm/OWNERS
index 5bd472f49d..d9195d8e54 100644
--- a/deps/v8/test/mjsunit/wasm/OWNERS
+++ b/deps/v8/test/mjsunit/wasm/OWNERS
@@ -2,6 +2,4 @@ ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
eholk@chromium.org
-mtrofin@chromium.org
-rossberg@chromium.org
titzer@chromium.org
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
index a1af7a4393..1a89c3ef1a 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
@@ -200,23 +200,8 @@ var inputs = [
];
var funcs = [
- f32_add,
- f32_sub,
- f32_mul,
- f32_div,
- f32_ceil,
- f32_floor,
-// TODO(bradnelson) f32_sqrt,
-// TODO(bradnelson) f32_abs,
-// TODO(bradnelson) f32_min is wrong for -0
-// TODO(bradnelson) f32_max is wrong for -0
- f32_eq,
- f32_ne,
- f32_lt,
- f32_lteq,
- f32_gt,
- f32_gteq,
- f32_neg,
+ f32_add, f32_sub, f32_mul, f32_div, f32_ceil, f32_floor, f32_sqrt, f32_abs,
+ f32_min, f32_max, f32_eq, f32_ne, f32_lt, f32_lteq, f32_gt, f32_gteq, f32_neg
];
(function () {
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
index c7b439fede..a07de98558 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
@@ -262,32 +262,10 @@ var inputs = [
];
var funcs = [
- f64_add,
- f64_sub,
- f64_mul,
- f64_div,
- f64_eq,
- f64_ne,
- f64_lt,
- f64_lteq,
- f64_gt,
- f64_gteq,
- f64_ceil,
- f64_floor,
-// TODO(bradnelson) f64_sqrt,
- f64_abs,
- f64_neg,
-// TODO(bradnelson) f64_min is wrong for -0
-// TODO(bradnelson) f64_max is wrong for -0
- f64_acos,
- f64_asin,
- f64_atan,
- f64_cos,
- f64_sin,
- f64_tan,
- f64_exp,
- f64_log,
- f64_atan2,
+ f64_add, f64_sub, f64_mul, f64_div, f64_eq, f64_ne, f64_lt,
+ f64_lteq, f64_gt, f64_gteq, f64_ceil, f64_floor, f64_sqrt, f64_abs,
+ f64_neg, f64_min, f64_max, f64_acos, f64_asin, f64_atan, f64_cos,
+ f64_sin, f64_tan, f64_exp, f64_log, f64_atan2,
];
(function () {
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index 0ec3296b03..d254c9e620 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -98,6 +98,26 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(clone.constructor, compiled_module.constructor);
})();
+(function SerializeWrappersWithSameSignature() {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i_v)
+ .addBody([kExprI32Const, 42])
+ .exportFunc();
+ builder.addFunction("main_same_signature", kSig_i_v)
+ .addBody([kExprI32Const, 23])
+ .exportFunc();
+
+ var wire_bytes = builder.toBuffer();
+ var compiled_module = new WebAssembly.Module(wire_bytes);
+ var serialized = %SerializeWasmModule(compiled_module);
+ var clone = %DeserializeWasmModule(serialized, wire_bytes);
+
+ assertNotNull(clone);
+ assertFalse(clone == undefined);
+ assertFalse(clone == compiled_module);
+ assertEquals(clone.constructor, compiled_module.constructor);
+})();
+
(function SerializeAfterInstantiation() {
let builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_v)
diff --git a/deps/v8/test/mjsunit/wasm/errors.js b/deps/v8/test/mjsunit/wasm/errors.js
index a90236459f..0d4893c18a 100644
--- a/deps/v8/test/mjsunit/wasm/errors.js
+++ b/deps/v8/test/mjsunit/wasm/errors.js
@@ -160,15 +160,22 @@ function assertConversionError(bytes, imports, msg) {
(function TestConversionError() {
let b = builder();
- b.addImport("foo", "bar", kSig_v_l);
- assertConversionError(b.addFunction("run", kSig_v_v).addBody([
- kExprI64Const, 0, kExprCallFunction, 0
- ]).exportFunc().end().toBuffer(), {foo:{bar: (l)=>{}}}, "invalid type");
-
- b = builder()
- assertConversionError(builder().addFunction("run", kSig_l_v).addBody([
- kExprI64Const, 0
- ]).exportFunc().end().toBuffer(), {}, "invalid type");
+ b.addImport('foo', 'bar', kSig_v_l);
+ let buffer = b.addFunction('run', kSig_v_v)
+ .addBody([kExprI64Const, 0, kExprCallFunction, 0])
+ .exportFunc()
+ .end()
+ .toBuffer();
+ assertConversionError(
+ buffer, {foo: {bar: (l) => {}}}, kTrapMsgs[kTrapTypeError]);
+
+ buffer = builder()
+ .addFunction('run', kSig_l_v)
+ .addBody([kExprI64Const, 0])
+ .exportFunc()
+ .end()
+ .toBuffer();
+ assertConversionError(buffer, {}, kTrapMsgs[kTrapTypeError]);
})();
@@ -178,7 +185,7 @@ function assertConversionError(bytes, imports, msg) {
builder.addImport("mod", "func", sig);
builder.addFunction("main", sig)
.addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
- .exportAs("main")
+ .exportAs("main");
var main = builder.instantiate({
mod: {
func: ()=>{%DebugTrace();}
diff --git a/deps/v8/test/mjsunit/wasm/function-prototype.js b/deps/v8/test/mjsunit/wasm/function-prototype.js
index d3356cec80..c2f1edd5c4 100644
--- a/deps/v8/test/mjsunit/wasm/function-prototype.js
+++ b/deps/v8/test/mjsunit/wasm/function-prototype.js
@@ -17,8 +17,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var func = builder.instantiate().exports.nine;
// Check type and existence of prototype
- assertEquals("function", typeof func.apply);
- assertTrue(func.prototype != undefined);
+ assertEquals('function', typeof func);
+ assertEquals('function', typeof func.apply);
+ assertEquals('prototype' in func, false);
assertEquals(String(f.index), func.name);
assertEquals(undefined, func.displayName);
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-function.js
index 2c89e45c3e..2c89e45c3e 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-function.js
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index 88d1bb719a..9176ec4932 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -33,7 +33,7 @@ function AddFunctions(builder) {
function js_div(a, b) { return (a / b) | 0; }
(function ExportedTableTest() {
- print("ExportedTableTest...");
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
@@ -102,9 +102,9 @@ function js_div(a, b) { return (a / b) | 0; }
})();
-(function ImportedTableTest() {
+(function ImportedTableTest1() {
let kTableSize = 10;
- print("ImportedTableTest...");
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
let d = builder.addImport("q", "js_div", kSig_i_ii);
@@ -172,9 +172,9 @@ function js_div(a, b) { return (a / b) | 0; }
}
})();
-(function ImportedTableTest() {
+(function ImportedTableTest2() {
let kTableSize = 10;
- print("ManualTableTest...");
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
@@ -240,7 +240,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function CumulativeTest() {
- print("CumulativeTest...");
+ print(arguments.callee.name);
let kTableSize = 10;
let table = new WebAssembly.Table(
@@ -251,7 +251,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addImportedTable("x", "table", kTableSize, kTableSize);
let g = builder.addImportedGlobal("x", "base", kWasmI32);
let sig_index = builder.addType(kSig_i_v);
- builder.addFunction("g", sig_index)
+ let f = builder.addFunction("f", sig_index)
.addBody([
kExprGetGlobal, g
]);
@@ -260,7 +260,7 @@ function js_div(a, b) { return (a / b) | 0; }
kExprGetLocal, 0,
kExprCallIndirect, sig_index, kTableZero]) // --
.exportAs("main");
- builder.addFunctionTableInit(g, true, [g]);
+ builder.addFunctionTableInit(g, true, [f.index]);
let module = new WebAssembly.Module(builder.toBuffer());
@@ -283,7 +283,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function TwoWayTest() {
- print("TwoWayTest...");
+ print(arguments.callee.name);
let kTableSize = 3;
// Module {m1} defines the table and exports it.
@@ -342,7 +342,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function MismatchedTableSize() {
- print("MismatchedTableSize...");
+ print(arguments.callee.name);
let kTableSize = 5;
for (var expsize = 1; expsize < 4; expsize++) {
@@ -374,7 +374,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function TableGrowBoundsCheck() {
- print("TableGrowBoundsCheck");
+ print(arguments.callee.name);
var kMaxSize = 30, kInitSize = 5;
let table = new WebAssembly.Table({element: "anyfunc",
initial: kInitSize, maximum: kMaxSize});
@@ -398,7 +398,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function CumulativeGrowTest() {
- print("CumulativeGrowTest...");
+ print(arguments.callee.name);
let table = new WebAssembly.Table({
element: "anyfunc", initial: 10, maximum: 30});
var builder = new WasmModuleBuilder();
@@ -460,7 +460,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function TestImportTooLarge() {
- print("TestImportTooLarge...");
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addImportedTable("t", "t", 1, 2);
@@ -478,7 +478,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function TableImportLargerThanCompiled() {
- print("TableImportLargerThanCompiled...");
+ print(arguments.callee.name);
var kMaxSize = 30, kInitSize = 5;
var builder = new WasmModuleBuilder();
builder.addImportedTable("x", "table", 1, 35);
@@ -492,7 +492,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function ModulesShareTableAndGrow() {
- print("ModulesShareTableAndGrow...");
+ print(arguments.callee.name);
let module1 = (() => {
let builder = new WasmModuleBuilder();
builder.addImportedTable("x", "table", 1, 35);
@@ -525,7 +525,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function MultipleElementSegments() {
let kTableSize = 10;
- print("MultipleElementSegments...");
+ print(arguments.callee.name);
let mul = (a, b) => a * b;
let add = (a, b) => a + b;
@@ -603,7 +603,8 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function IndirectCallIntoOtherInstance() {
- print("IndirectCallIntoOtherInstance...");
+ print(arguments.callee.name);
+
var mem_1 = new WebAssembly.Memory({initial: 1});
var mem_2 = new WebAssembly.Memory({initial: 1});
var view_1 = new Int32Array(mem_1.buffer);
@@ -644,7 +645,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function ImportedFreestandingTable() {
- print("ImportedFreestandingTable...");
+ print(arguments.callee.name);
function forceGc() {
gc();
@@ -709,7 +710,8 @@ function js_div(a, b) { return (a / b) | 0; }
// Remove this test when v8:7232 is addressed comprehensively.
(function TablesAreImmutableInWasmCallstacks() {
- print('TablesAreImmutableInWasmCallstacks...');
+ print(arguments.callee.name);
+
let table = new WebAssembly.Table({initial:2, element:'anyfunc'});
let builder = new WasmModuleBuilder();
@@ -743,3 +745,93 @@ function js_div(a, b) { return (a / b) | 0; }
table.set(0, null);
assertEquals(null, table.get(0));
})();
+
+(function ImportedWasmFunctionPutIntoTable() {
+ print(arguments.callee.name);
+
+ let wasm_mul = (() => {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("mul", kSig_i_ii)
+ .addBody(
+ [kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprI32Mul])
+ .exportFunc();
+ return builder.instantiate().exports.mul;
+ })();
+
+ let builder = new WasmModuleBuilder();
+
+ let j = builder.addImport("q", "js_div", kSig_i_ii);
+ let w = builder.addImport("q", "wasm_mul", kSig_i_ii);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 33, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ builder.setFunctionTableBounds(10, 10);
+ let g = builder.addImportedGlobal("q", "base", kWasmI32);
+ builder.addFunctionTableInit(g, true, [j, w]);
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ for (var i = 0; i < 5; i++) {
+ let instance = new WebAssembly.Instance(module, {q: {base: i, js_div: js_div, wasm_mul: wasm_mul}});
+ let j = i + 1;
+
+ assertThrows(() => {instance.exports.main(j, i-1)});
+ assertEquals((33/j)|0, instance.exports.main(j, i+0));
+ assertEquals((33*j)|0, instance.exports.main(j, i+1));
+ assertThrows(() => {instance.exports.main(j, i+2)});
+ }
+
+})();
+
+(function ImportedWasmFunctionPutIntoImportedTable() {
+ print(arguments.callee.name);
+
+ let kTableSize = 10;
+
+ let wasm_mul = (() => {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("mul", kSig_i_ii)
+ .addBody(
+ [kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprI32Mul])
+ .exportFunc();
+ return builder.instantiate().exports.mul;
+ })();
+
+ let table = new WebAssembly.Table({element: "anyfunc",
+ initial: kTableSize,
+ maximum: kTableSize});
+
+ let builder = new WasmModuleBuilder();
+
+ let j = builder.addImport("q", "js_div", kSig_i_ii);
+ let w = builder.addImport("q", "wasm_mul", kSig_i_ii);
+ builder.addImportedTable("q", "table", kTableSize, kTableSize);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 44, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ let g = builder.addImportedGlobal("q", "base", kWasmI32);
+ builder.addFunctionTableInit(g, true, [j, w]);
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+ for (var i = 0; i < 5; i++) {
+ let instance = new WebAssembly.Instance(module, {q: {base: i, js_div: js_div, wasm_mul: wasm_mul, table: table}});
+ let j = i + 1;
+
+ assertEquals((44/j)|0, instance.exports.main(j, i+0));
+ assertEquals((44*j)|0, instance.exports.main(j, i+1));
+ assertThrows(() => {instance.exports.main(j, i+2)});
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index f5697eb00f..a57498b9a8 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -296,7 +296,7 @@ function checkStack(stack, expected_lines) {
} catch (e) {
if (!(e instanceof TypeError)) throw e;
checkStack(stripPath(e.stack), [
- 'TypeError: invalid type', // -
+ 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
' at direct (wasm-function[1]:1)', // -
' at main (wasm-function[3]:3)', // -
/^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
@@ -309,7 +309,7 @@ function checkStack(stack, expected_lines) {
} catch (e) {
if (!(e instanceof TypeError)) throw e;
checkStack(stripPath(e.stack), [
- 'TypeError: invalid type', // -
+ 'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
' at indirect (wasm-function[2]:1)', // -
' at main (wasm-function[3]:3)', // -
/^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
@@ -493,3 +493,30 @@ function checkStack(stack, expected_lines) {
tab.set(0, instance1.exports.exp);
instance2.exports.call2();
})();
+
+(function testTableCall3() {
+ // See crbug.com/814562.
+ print(arguments.callee.name);
+ const builder0 = new WasmModuleBuilder();
+ const sig_index = builder0.addType(kSig_i_v);
+ builder0.addFunction('main', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprCallIndirect, sig_index, kTableZero
+ ]) // --
+ .exportAs('main');
+ builder0.setFunctionTableBounds(3, 3);
+ builder0.addExportOfKind('table', kExternalTable);
+ const module0 = new WebAssembly.Module(builder0.toBuffer());
+ const instance0 = new WebAssembly.Instance(module0);
+
+ const builder1 = new WasmModuleBuilder();
+ builder1.addFunction('main', kSig_i_v).addBody([kExprUnreachable]);
+ builder1.addImportedTable('z', 'table');
+ builder1.addFunctionTableInit(0, false, [0], true);
+ const module1 = new WebAssembly.Module(builder1.toBuffer());
+ const instance1 =
+ new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
+ assertThrows(
+ () => instance0.exports.main(0), WebAssembly.RuntimeError, 'unreachable');
+})();
diff --git a/deps/v8/test/mjsunit/wasm/liftoff.js b/deps/v8/test/mjsunit/wasm/liftoff.js
index b65f83f9ec..c57cbfc090 100644
--- a/deps/v8/test/mjsunit/wasm/liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/liftoff.js
@@ -7,6 +7,22 @@
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
+(function testLiftoffFlag() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('i32_add', kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .exportFunc();
+
+ const module = new WebAssembly.Module(builder.toBuffer());
+ const instance = new WebAssembly.Instance(module);
+ const instance2 = new WebAssembly.Instance(module);
+
+ assertEquals(%IsLiftoffFunction(instance.exports.i32_add),
+ %IsLiftoffFunction(instance2.exports.i32_add));
+})();
+
+
(function testLiftoffSync() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/memory-size.js b/deps/v8/test/mjsunit/wasm/memory-size.js
index 0c96efb798..f803df2e3d 100644
--- a/deps/v8/test/mjsunit/wasm/memory-size.js
+++ b/deps/v8/test/mjsunit/wasm/memory-size.js
@@ -7,8 +7,11 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
+var kV8MaxWasmMemoryPages = 32767; // ~ 2 GiB
+var kSpecMaxWasmMemoryPages = 65536; // 4 GiB
+
(function testMemorySizeZero() {
- print("testMemorySizeZero()");
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
builder.addMemory(0, 0, false);
builder.addFunction("memory_size", kSig_i_v)
@@ -19,7 +22,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
})();
(function testMemorySizeNonZero() {
- print("testMemorySizeNonZero()");
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
var size = 11;
builder.addMemory(size, size, false);
@@ -29,3 +32,42 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var module = builder.instantiate();
assertEquals(size, module.exports.memory_size());
})();
+
+(function testMemorySizeSpecMaxOk() {
+ print(arguments.callee.name);
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, kSpecMaxWasmMemoryPages, true);
+ builder.addFunction("memory_size", kSig_i_v)
+ .addBody([kExprMemorySize, kMemoryZero])
+ .exportFunc();
+ var module = builder.instantiate();
+ assertEquals(1, module.exports.memory_size());
+})();
+
+(function testMemorySizeV8MaxPlus1Throws() {
+ print(arguments.callee.name);
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(kV8MaxWasmMemoryPages + 1,
+ kV8MaxWasmMemoryPages + 1, false);
+ builder.addFunction("memory_size", kSig_i_v)
+ .addBody([kExprMemorySize, kMemoryZero])
+ .exportFunc();
+ assertThrows(() => builder.instantiate());
+})();
+
+(function testMemorySpecMaxOk() {
+ print(arguments.callee.name);
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, kSpecMaxWasmMemoryPages, false);
+ builder.addFunction("memory_size", kSig_i_v)
+ .addBody([kExprMemorySize, kMemoryZero])
+ .exportFunc();
+ var module = builder.instantiate();
+ assertEquals(1, module.exports.memory_size());
+})();
+
+(function testMemoryInitialMaxPlus1Throws() {
+ print(arguments.callee.name);
+ assertThrows(() => new WebAssembly.Memory(
+ {initial: kV8WasmMaxMemoryPages + 1}));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-grow.js b/deps/v8/test/mjsunit/wasm/table-grow.js
index a9a4ba298c..c78d272139 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow.js
@@ -286,7 +286,7 @@ let id = (() => { // identity exported function
assertInvalidFunction = function(s) {
assertThrows(
() => instances[i].exports.main(s), WebAssembly.RuntimeError,
- /invalid function/);
+ kTrapMsgs[kTrapFuncInvalid]);
}
assertInvalidFunction(size);
assertInvalidFunction(size + 1);
diff --git a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
deleted file mode 100644
index e298468350..0000000000
--- a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --allow-natives-syntax
-
-load("test/mjsunit/wasm/wasm-constants.js");
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-var expect_elison = 0;
-var expect_no_elison = 1;
-// function calls stack: first_export -> first_func -> first_import ->
-// second_export -> second_import
-// In this case, first_import and second_export have same signature,
-// So that wrappers will be removed
-(function TestWasmWrapperElision() {
- var imported = function (a) {
- return a;
- };
-
- var second_module = new WasmModuleBuilder();
- var sig_index = second_module.addType(kSig_i_i);
- second_module
- .addImport("import_module_2", "import_name_2", sig_index);
- second_module
- .addFunction("second_export", sig_index)
- .addBody([
- kExprGetLocal, 0,
- kExprCallFunction, 0,
- kExprReturn
- ])
- .exportFunc();
-
- var first_module = new WasmModuleBuilder();
- var sig_index = first_module.addType(kSig_i_i);
- first_module
- .addImport("import_module_1", "import_name_1", sig_index);
- first_module
- .addFunction("first_export", sig_index)
- .addBody([
- kExprGetLocal, 0,
- kExprCallFunction, 2,
- kExprReturn
- ])
- .exportFunc();
- first_module
- .addFunction("first_func", sig_index)
- .addBody([
- kExprI32Const, 1,
- kExprGetLocal, 0,
- kExprI32Add,
- kExprCallFunction, 0,
- kExprReturn
- ]);
-
- var f = second_module
- .instantiate({import_module_2: {import_name_2: imported}})
- .exports.second_export;
- var the_export = first_module
- .instantiate({import_module_1: {import_name_1: f}})
- .exports.first_export;
- assertEquals(the_export(2), 3);
- assertEquals(the_export(-1), 0);
- assertEquals(the_export(0), 1);
- assertEquals(the_export(5.5), 6);
- assertEquals(%CheckWasmWrapperElision(the_export, expect_elison), true);
-})();
-
-// Function calls stack: first_export -> first_func -> first_import ->
-// second_export -> second_import
-// In this test, first_import and second_export have the same signature, and
-// therefore the wrappers will be removed. If the wrappers are not removed, then
-// the test crashes because of the int64 parameter, which is not allowed in the
-// wrappers.
-(function TestWasmWrapperElisionInt64() {
- var imported = function (a) {
- return a;
- };
-
- var second_module = new WasmModuleBuilder();
- var sig_index1 = second_module.addType(kSig_i_i);
- var sig_index_ll = second_module.addType(kSig_l_l);
- second_module
- .addImport("import_module_2", "import_name_2", sig_index1);
- second_module
- .addFunction("second_export", sig_index_ll)
- .addBody([
- kExprGetLocal, 0,
- kExprI32ConvertI64,
- kExprCallFunction, 0,
- kExprI64SConvertI32,
- kExprReturn
- ])
- .exportFunc();
-
- var first_module = new WasmModuleBuilder();
- var sig_index = first_module.addType(kSig_i_v);
- var sig_index_ll = first_module.addType(kSig_l_l);
- first_module
- .addImport("import_module_1", "import_name_1", sig_index_ll);
- first_module
- .addFunction("first_export", sig_index)
- .addBody([
- kExprI64Const, 2,
- kExprCallFunction, 2,
- kExprI32ConvertI64,
- kExprReturn
- ])
- .exportFunc();
- first_module
- .addFunction("first_func", sig_index_ll)
- .addBody([
- kExprI64Const, 1,
- kExprGetLocal, 0,
- kExprI64Add,
- kExprCallFunction, 0,
- kExprReturn
- ]);
-
- var f = second_module
- .instantiate({import_module_2: {import_name_2: imported}})
- .exports.second_export;
- var the_export = first_module
- .instantiate({import_module_1: {import_name_1: f}})
- .exports.first_export;
- assertEquals(the_export(), 3);
-})();
-
-// function calls stack: first_export -> first_func -> first_import ->
-// second_export -> second_import
-// In this case, second_export has fewer params than first_import,
-// so instantiation should fail.
-assertThrows(function TestWasmWrapperNoElisionLessParams() {
- var imported = function (a) {
- return a;
- };
-
- var second_module = new WasmModuleBuilder();
- var sig_index_1 = second_module.addType(kSig_i_i);
- second_module
- .addImport("import_module_2", "import_name_2", sig_index_1);
- second_module
- .addFunction("second_export", sig_index_1)
- .addBody([
- kExprGetLocal, 0,
- kExprCallFunction, 0,
- kExprReturn
- ])
- .exportFunc();
-
- var first_module = new WasmModuleBuilder();
- var sig_index_2 = first_module.addType(kSig_i_ii);
- first_module
- .addImport("import_module_1", "import_name_1", sig_index_2);
- first_module
- .addFunction("first_export", sig_index_2)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprCallFunction, 2,
- kExprReturn
- ])
- .exportFunc();
- first_module
- .addFunction("first_func", sig_index_2)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprCallFunction, 0,
- kExprReturn
- ]);
-
- var f = second_module
- .instantiate({import_module_2: {import_name_2: imported}})
- .exports.second_export;
- var the_export = first_module
- .instantiate({import_module_1: {import_name_1: f}})
- .exports.first_export;
- assertEquals(the_export(4, 5), 4);
- assertEquals(the_export(-1, 4), -1);
- assertEquals(the_export(0, 2), 0);
- assertEquals(the_export(9.9, 4.3), 9);
- assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
-});
-
-// function calls stack: first_export -> first_func -> first_import ->
-// second_export -> second_import
-// In this case, second_export has more params than first_import,
-// so instantiation should fail.
-assertThrows(function TestWasmWrapperNoElisionMoreParams() {
- var imported = function (a, b, c) {
- return a+b+c;
- };
-
- var second_module = new WasmModuleBuilder();
- var sig_index_3 = second_module.addType(kSig_i_iii);
- second_module
- .addImport("import_module_2", "import_name_2", sig_index_3);
- second_module
- .addFunction("second_export", sig_index_3)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprGetLocal, 2,
- kExprCallFunction, 0,
- kExprReturn
- ])
- .exportFunc();
-
- var first_module = new WasmModuleBuilder();
- var sig_index_2 = first_module.addType(kSig_i_ii);
- first_module
- .addImport("import_module_1", "import_name_1", sig_index_2);
- first_module
- .addFunction("first_export", sig_index_2)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprCallFunction, 2,
- kExprReturn
- ])
- .exportFunc();
- first_module
- .addFunction("first_func", sig_index_2)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprCallFunction, 0,
- kExprReturn
- ]);
-
- var f = second_module
- .instantiate({import_module_2: {import_name_2: imported}})
- .exports.second_export;
- var the_export = first_module
- .instantiate({import_module_1: {import_name_1: f}})
- .exports.first_export;
- assertEquals(the_export(5, 6), 11);
- assertEquals(the_export(-1, -4), -5);
- assertEquals(the_export(0, 0), 0);
- assertEquals(the_export(1.1, 2.7), 3);
- assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
-});
-
-// function calls stack: first_export -> first_func -> first_import ->
-// second_export -> second_import
-// In this case, second_export has different params type with first_import,
-// so instantiation should fail.
-assertThrows(function TestWasmWrapperNoElisionTypeMismatch() {
- var imported = function (a, b) {
- return a+b;
- };
-
- var second_module = new WasmModuleBuilder();
- var sig_index_2 = second_module.addType(kSig_d_dd);
- second_module
- .addImport("import_module_2", "import_name_2", sig_index_2);
- second_module
- .addFunction("second_export", sig_index_2)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprCallFunction, 0,
- kExprReturn
- ])
- .exportFunc();
-
- var first_module = new WasmModuleBuilder();
- var sig_index_2 = first_module.addType(kSig_i_ii);
- first_module
- .addImport("import_module_1", "import_name_1", sig_index_2);
- first_module
- .addFunction("first_export", sig_index_2)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprCallFunction, 2,
- kExprReturn
- ])
- .exportFunc();
- first_module
- .addFunction("first_func", sig_index_2)
- .addBody([
- kExprGetLocal, 0,
- kExprGetLocal, 1,
- kExprCallFunction, 0,
- kExprReturn
- ]);
-
- var f = second_module
- .instantiate({import_module_2: {import_name_2: imported}})
- .exports.second_export;
- var the_export = first_module
- .instantiate({import_module_1: {import_name_1: f}})
- .exports.first_export;
- assertEquals(the_export(2.8, 9.1), 11);
- assertEquals(the_export(-1.7, -2.5), -3);
- assertEquals(the_export(0.0, 0.0), 0);
- assertEquals(the_export(2, -2), 0);
- assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
-});
-
-
-(function TestSimpleI64Ret() {
- var builder = new WasmModuleBuilder();
- builder.addFunction("exp", kSig_l_v)
- .addBody([
- kExprI64Const, 23
- ])
- .exportFunc();
- var exported = builder.instantiate().exports.exp;
-
- var builder = new WasmModuleBuilder();
- builder.addImport("imp", "func", kSig_l_v);
- builder.addFunction("main", kSig_i_v)
- .addBody([
- kExprCallFunction, 0,
- kExprI32ConvertI64
- ])
- .exportFunc();
-
- var instance = builder.instantiate({imp: {func: exported}});
-
- assertEquals(23, instance.exports.main());
-
-})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index 4c86065b89..4a303c77ac 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -114,7 +114,7 @@ let kSig_i_dd = makeSig([kWasmF64, kWasmF64], [kWasmI32]);
let kSig_v_v = makeSig([], []);
let kSig_i_v = makeSig([], [kWasmI32]);
let kSig_l_v = makeSig([], [kWasmI64]);
-let kSig_f_v = makeSig([], [kWasmF64]);
+let kSig_f_v = makeSig([], [kWasmF32]);
let kSig_d_v = makeSig([], [kWasmF64]);
let kSig_v_i = makeSig([kWasmI32], []);
let kSig_v_ii = makeSig([kWasmI32, kWasmI32], []);
@@ -374,7 +374,7 @@ let kTrapRemByZero = 4;
let kTrapFloatUnrepresentable = 5;
let kTrapFuncInvalid = 6;
let kTrapFuncSigMismatch = 7;
-let kTrapInvalidIndex = 8;
+let kTrapTypeError = 8;
let kTrapMsgs = [
"unreachable",
@@ -382,10 +382,10 @@ let kTrapMsgs = [
"divide by zero",
"divide result unrepresentable",
"remainder by zero",
- "integer result unrepresentable",
- "invalid function",
+ "float unrepresentable in integer range",
+ "invalid index into function table",
"function signature mismatch",
- "invalid index into function table"
+ "wasm function signature contains illegal type"
];
function assertTraps(trap, code) {
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.gyp b/deps/v8/test/mkgrokdump/mkgrokdump.gyp
deleted file mode 100644
index 56f9ad14e0..0000000000
--- a/deps/v8/test/mkgrokdump/mkgrokdump.gyp
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'v8_code': 1,
- },
- 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'mkgrokdump',
- 'type': 'executable',
- 'dependencies': [
- '../../src/v8.gyp:v8',
- '../../src/v8.gyp:v8_libbase',
- '../../src/v8.gyp:v8_libplatform',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- 'mkgrokdump.cc',
- ],
- },
- ],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'mkgrokdump_run',
- 'type': 'none',
- 'dependencies': [
- 'mkgrokdump',
- ],
- 'includes': [
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'mkgrokdump.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/mkgrokdump/testcfg.py b/deps/v8/test/mkgrokdump/testcfg.py
index de8e71f7ea..d8f0380e75 100644
--- a/deps/v8/test/mkgrokdump/testcfg.py
+++ b/deps/v8/test/mkgrokdump/testcfg.py
@@ -18,7 +18,7 @@ class TestSuite(testsuite.TestSuite):
v8_path = os.path.dirname(os.path.dirname(os.path.abspath(self.root)))
self.expected_path = os.path.join(v8_path, 'tools', 'v8heapconst.py')
- def ListTests(self, context):
+ def ListTests(self):
test = self._create_test(SHELL)
return [test]
@@ -33,7 +33,7 @@ class TestCase(testcase.TestCase):
def _get_statusfile_flags(self):
return []
- def _get_mode_flags(self, ctx):
+ def _get_mode_flags(self):
return []
def get_shell(self):
@@ -44,5 +44,5 @@ class TestCase(testcase.TestCase):
return mkgrokdump.OutProc(self.expected_outcomes, self.suite.expected_path)
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/mozilla/mozilla.gyp b/deps/v8/test/mozilla/mozilla.gyp
deleted file mode 100644
index 0327dd8dab..0000000000
--- a/deps/v8/test/mozilla/mozilla.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'mozilla_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'mozilla.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 40334cdcd8..2437821722 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -118,6 +118,10 @@
# Invalid according to ES2015 syntax. (Details: crbug.com/726625)
'ecma_3/extensions/regress-368516': [FAIL],
+ # Function.prototype.toString revision.
+ # (https://chromium-review.googlesource.com/c/546941#message-5dc0f5efe277388501e8870e39ec70383dfc7db8)
+ 'js1_5/Scope/regress-185485': [FAIL],
+
##################### SKIPPED TESTS #####################
# This test checks that we behave properly in an out-of-memory
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 8e69b7d9b7..52ba9dcbcb 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -56,11 +56,11 @@ TEST_DIRS = """
class TestSuite(testsuite.TestSuite):
- def __init__(self, name, root):
- super(TestSuite, self).__init__(name, root)
- self.testroot = os.path.join(root, "data")
+ def __init__(self, *args, **kwargs):
+ super(TestSuite, self).__init__(*args, **kwargs)
+ self.testroot = os.path.join(self.root, "data")
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for testdir in TEST_DIRS:
current_root = os.path.join(self.testroot, testdir)
@@ -86,7 +86,7 @@ class TestSuite(testsuite.TestSuite):
class TestCase(testcase.TestCase):
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
files = [os.path.join(self.suite.root, "mozilla-shell-emulation.js")]
testfilename = self.path + ".js"
testfilepath = testfilename.split("/")
@@ -100,7 +100,7 @@ class TestCase(testcase.TestCase):
files.append(os.path.join(self.suite.testroot, testfilename))
return files
- def _get_suite_flags(self, ctx):
+ def _get_suite_flags(self):
return ['--expose-gc']
def _get_source_path(self):
@@ -118,5 +118,5 @@ class TestCase(testcase.TestCase):
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/optimize_for_size.gyp b/deps/v8/test/optimize_for_size.gyp
deleted file mode 100644
index 8728479c23..0000000000
--- a/deps/v8/test/optimize_for_size.gyp
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'optimize_for_size_run',
- 'type': 'none',
- 'dependencies': [
- 'cctest/cctest.gyp:cctest_run',
- 'debugger/debugger.gyp:debugger_run',
- 'intl/intl.gyp:intl_run',
- 'mjsunit/mjsunit.gyp:mjsunit_run',
- 'webkit/webkit.gyp:webkit_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'optimize_for_size.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/perf.gyp b/deps/v8/test/perf.gyp
deleted file mode 100644
index 4f024f774f..0000000000
--- a/deps/v8/test/perf.gyp
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'perf_run',
- 'type': 'none',
- 'dependencies': [
- 'cctest/cctest.gyp:cctest_exe_run',
- '../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'perf.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/preparser/preparser.gyp b/deps/v8/test/preparser/preparser.gyp
deleted file mode 100644
index 8e944ed009..0000000000
--- a/deps/v8/test/preparser/preparser.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'preparser_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'preparser.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index f6814e756f..0ffde419d4 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -60,7 +60,7 @@ class TestSuite(testsuite.TestSuite):
return MkTest
execfile(pathname, {"Test": Test, "Template": Template})
- def ListTests(self, context):
+ def ListTests(self):
result = []
# Find all .pyt files in this directory.
@@ -77,33 +77,30 @@ class TestSuite(testsuite.TestSuite):
def _test_class(self):
return TestCase
- def _LegacyVariantsGeneratorFactory(self):
- return testsuite.StandardLegacyVariantsGenerator
-
def _variants_gen_class(self):
return VariantsGenerator
class TestCase(testcase.TestCase):
- def __init__(self, suite, path, name, source, template_flags):
- super(TestCase, self).__init__(suite, path, name)
+ def __init__(self, suite, path, name, test_config, source, template_flags):
+ super(TestCase, self).__init__(suite, path, name, test_config)
self._source = source
self._template_flags = template_flags
- def _get_cmd_params(self, ctx):
+ def _get_cmd_params(self):
return (
- self._get_files_params(ctx) +
- self._get_extra_flags(ctx) +
+ self._get_files_params() +
+ self._get_extra_flags() +
['-e', self._source] +
self._template_flags +
self._get_variant_flags() +
self._get_statusfile_flags() +
- self._get_mode_flags(ctx) +
+ self._get_mode_flags() +
self._get_source_flags()
)
- def _get_mode_flags(self, ctx):
+ def _get_mode_flags(self):
return []
def is_source_available(self):
@@ -113,5 +110,5 @@ class TestCase(testcase.TestCase):
return self._source
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/test262/local-tests/test/intl402/DateTimeFormat/12.1.1_1.js b/deps/v8/test/test262/local-tests/test/intl402/DateTimeFormat/12.1.1_1.js
index ef843d4bb4..9cabc6e50b 100644
--- a/deps/v8/test/test262/local-tests/test/intl402/DateTimeFormat/12.1.1_1.js
+++ b/deps/v8/test/test262/local-tests/test/intl402/DateTimeFormat/12.1.1_1.js
@@ -31,12 +31,5 @@ testWithIntlConstructors(function (Constructor) {
$ERROR("DateTimeFormat object created with \"new\" was not ignored as this-value.");
}
- // variant 2: use constructor as a function
- obj = Constructor();
- newObj = Intl.DateTimeFormat.call(obj);
- if (obj === newObj) {
- $ERROR("DateTimeFormat object created with constructor as function was not ignored as this-value.");
- }
-
return true;
});
diff --git a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/11.1.1_1.js b/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/11.1.1_1.js
index 2d42609acb..b492a41183 100644
--- a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/11.1.1_1.js
+++ b/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/11.1.1_1.js
@@ -31,12 +31,5 @@ testWithIntlConstructors(function (Constructor) {
$ERROR("NumberFormat object created with \"new\" was not ignored as this-value.");
}
- // variant 2: use constructor as a function
- obj = Constructor();
- newObj = Intl.NumberFormat.call(obj);
- if (obj === newObj) {
- $ERROR("NumberFormat object created with constructor as function was not ignored as this-value.");
- }
-
return true;
});
diff --git a/deps/v8/test/test262/test262.gyp b/deps/v8/test/test262/test262.gyp
deleted file mode 100644
index eb14da4010..0000000000
--- a/deps/v8/test/test262/test262.gyp
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'test262_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'test262.isolate',
- ],
- 'actions': [
- {
- 'action_name': 'archive_test262',
- 'inputs': ['archive.py', '<!@(python list.py --quoted)'],
- 'outputs': ['data.tar'],
- 'action': ['python', 'archive.py'],
- },
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index bd4b667a09..ff15074215 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -87,28 +87,29 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4895
'built-ins/TypedArrays/internals/DefineOwnProperty/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/DefineOwnProperty/detached-buffer-realm': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/tonumber-value-detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/Get/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/Get/detached-buffer-realm': [FAIL],
+ 'built-ins/TypedArrays/internals/Get/infinity-detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/GetOwnProperty/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/GetOwnProperty/detached-buffer-realm': [FAIL],
+ 'built-ins/TypedArrays/internals/GetOwnProperty/enumerate-detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/HasProperty/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/HasProperty/detached-buffer-realm': [FAIL],
+ 'built-ins/TypedArrays/internals/HasProperty/infinity-with-detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/Set/detached-buffer': [FAIL],
'built-ins/TypedArrays/internals/Set/detached-buffer-realm': [FAIL],
'built-ins/TypedArrays/internals/Set/tonumber-value-detached-buffer': [FAIL],
- 'built-ins/TypedArrays/internals/DefineOwnProperty/tonumber-value-detached-buffer': [FAIL],
+ 'built-ins/TypedArrays/typedarray-arg-detached-when-species-retrieved-different-type': [FAIL],
+ 'built-ins/TypedArrays/typedarray-arg-detached-when-species-retrieved-same-type': [FAIL],
# Some TypedArray methods throw due to the same bug, from Get
'built-ins/TypedArray/prototype/every/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/filter/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/find/predicate-may-detach-buffer': [FAIL],
'built-ins/TypedArray/prototype/findIndex/predicate-may-detach-buffer': [FAIL],
'built-ins/TypedArray/prototype/forEach/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/map/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/reduce/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/reduceRight/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-other-targettype': [FAIL],
- 'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-same-targettype': [FAIL],
- 'built-ins/TypedArray/prototype/slice/detached-buffer-get-ctor': [FAIL],
'built-ins/TypedArray/prototype/some/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/sort/detached-buffer-comparefn': [FAIL],
# DataView functions should also throw on detached buffers
@@ -267,7 +268,15 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4698
'language/expressions/call/tco-call-args': [SKIP],
+ 'language/expressions/call/tco-cross-realm-class-construct': [SKIP],
+ 'language/expressions/call/tco-cross-realm-class-derived-construct': [SKIP],
+ 'language/expressions/call/tco-cross-realm-fun-call': [SKIP],
+ 'language/expressions/call/tco-cross-realm-fun-construct': [SKIP],
'language/expressions/call/tco-member-args': [SKIP],
+ 'language/expressions/call/tco-non-eval-function': [SKIP],
+ 'language/expressions/call/tco-non-eval-function-dynamic': [SKIP],
+ 'language/expressions/call/tco-non-eval-global': [SKIP],
+ 'language/expressions/call/tco-non-eval-with': [SKIP],
'language/expressions/comma/tco-final': [SKIP],
'language/expressions/conditional/tco-cond': [SKIP],
'language/expressions/conditional/tco-pos': [SKIP],
@@ -410,10 +419,6 @@
'built-ins/TypedArrays/buffer-arg-use-default-proto-if-custom-proto-is-not-object-sab': ['--harmony-sharedarraybuffer'],
'built-ins/TypedArrays/internals/Get/indexed-value-sab': ['--harmony-sharedarraybuffer'],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6045
- 'intl402/NumberFormat/prototype/format/11.3.2_TRF': [FAIL],
- 'intl402/NumberFormat/prototype/format/11.3.2_TRP': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=6049
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-caller': [FAIL_SLOPPY],
'built-ins/Object/internals/DefineOwnProperty/consistent-value-function-arguments': [FAIL_SLOPPY],
@@ -422,16 +427,6 @@
'built-ins/Proxy/ownKeys/return-duplicate-entries-throws': [FAIL],
'built-ins/Proxy/ownKeys/return-duplicate-symbol-entries-throws': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=6861
- 'language/expressions/object/method-definition/async-gen-yield-star-sync-next': [FAIL],
- 'language/expressions/class/async-gen-method-static-yield-star-sync-next': [FAIL],
- 'language/expressions/async-generator/yield-star-sync-next': [FAIL],
- 'language/statements/class/async-gen-method-static-yield-star-sync-next': [FAIL],
- 'language/expressions/async-generator/named-yield-star-sync-next': [FAIL],
- 'language/expressions/class/async-gen-method-yield-star-sync-next': [FAIL],
- 'language/statements/class/async-gen-method-yield-star-sync-next': [FAIL],
- 'language/statements/async-generator/yield-star-sync-next': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=6791
'built-ins/BigInt/prototype/Symbol.toStringTag': [SKIP],
'built-ins/DataView/prototype/getBigInt64/*': [SKIP],
@@ -440,6 +435,8 @@
'built-ins/DataView/prototype/setBigUint64/*': [SKIP],
'built-ins/TypedArrays/BigInt64Array/*': [SKIP],
'built-ins/TypedArrays/BigUint64Array/*': [SKIP],
+ # The current version of test262 still uses parseInt.
+ 'built-ins/BigInt/parseInt/*': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=7184
'annexB/language/expressions/yield/star-iterable-return-emulates-undefined-throws-when-called': [FAIL],
@@ -450,6 +447,59 @@
'language/statements/class/fields-indirect-eval-err-contains-arguments': [FAIL],
'language/expressions/class/fields-indirect-eval-err-contains-arguments': [FAIL],
+ # https://github.com/tc39/test262/pull/1452
+ 'built-ins/Object/setPrototypeOf/bigint': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7468
+ 'language/statements/class/privatename-not-valid-earlyerr-script-8': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5690
+ 'language/expressions/call/eval-spread': [FAIL],
+ 'language/expressions/call/eval-spread-empty-leading': [FAIL],
+ 'language/expressions/call/eval-spread-empty-trailing': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7470
+ 'language/module-code/namespace/internals/enumerate-binding-uninit': [FAIL],
+ 'language/module-code/namespace/internals/object-keys-binding-uninit': [FAIL],
+ 'language/module-code/namespace/internals/object-propertyIsEnumerable-binding-uninit': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7471
+ 'intl402/DateTimeFormat/prototype/format/time-clip-near-time-boundaries': [FAIL],
+ 'intl402/DateTimeFormat/prototype/format/time-clip-to-integer': [FAIL],
+ 'intl402/DateTimeFormat/prototype/formatToParts/time-clip-near-time-boundaries': [FAIL],
+ 'intl402/DateTimeFormat/prototype/formatToParts/time-clip-to-integer': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7472
+ 'intl402/NumberFormat/currency-digits': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7473
+ 'intl402/language-tags-canonicalized': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7474
+ 'intl402/NumberFormat/prototype/format/format-fraction-digits': [FAIL],
+ 'intl402/NumberFormat/prototype/format/format-significant-digits': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7475
+ 'built-ins/Date/UTC/non-integer-values': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7480
+ 'intl402/Collator/unicode-ext-seq-in-private-tag': [FAIL],
+ 'intl402/Collator/unicode-ext-seq-with-attribute': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7481
+ 'intl402/NumberFormat/ignore-invalid-unicode-ext-values': [FAIL],
+ 'intl402/DateTimeFormat/ignore-invalid-unicode-ext-values': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7482
+ 'intl402/DateTimeFormat/prototype/resolvedOptions/resolved-locale-with-hc-unicode': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7483
+ 'annexB/built-ins/Function/createdynfn-html-close-comment-params': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5012
+ 'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/preferred-grandfathered': [FAIL],
+ 'intl402/Intl/getCanonicalLocales/preferred-variant': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -457,17 +507,7 @@
# to be either marked as bugs with issues filed for them or as deliberate
# incompatibilities if the test cases turn out to be broken or ambiguous.
# Some of these are related to v8:4361 in being visible side effects from Intl.
- 'intl402/6.2.3_a': [FAIL],
- 'intl402/Collator/10.1.2_a': [PASS, FAIL],
- 'intl402/Collator/10.2.3_b': [PASS, FAIL],
'intl402/DateTimeFormat/prototype/resolvedOptions/hourCycle': [FAIL],
- 'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
- 'intl402/DateTimeFormat/12.2.3_b': [FAIL],
- 'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
- 'intl402/NumberFormat/11.1.1_20_c': [FAIL],
- 'intl402/NumberFormat/11.1.2': [PASS, FAIL],
- 'intl402/NumberFormat/11.2.3_b': [FAIL],
- 'intl402/String/prototype/localeCompare/13.1.1_7': [PASS, FAIL],
##################### DELIBERATE INCOMPATIBILITIES #####################
@@ -514,6 +554,10 @@
# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
'built-ins/Array/prototype/sort/bug_596_1': [PASS, FAIL_OK],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7418
+ 'language/line-terminators/S7.3_A2.3': [FAIL],
+ 'language/line-terminators/S7.3_A2.4': [FAIL],
+
# https://github.com/tc39/test262/pull/688#pullrequestreview-14025354
'built-ins/Function/internals/Construct/derived-this-uninitialized-realm': [FAIL],
@@ -548,7 +592,6 @@
'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T1': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP],
'built-ins/RegExp/S15.10.2.12_A3_T1': [SKIP],
- 'intl402/9.2.6_4_b': [SKIP],
'language/literals/regexp/S7.8.5_A1.1_T2': [SKIP],
'language/literals/regexp/S7.8.5_A1.4_T2': [SKIP],
'language/literals/regexp/S7.8.5_A2.1_T2': [SKIP],
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 2f23fb67fb..602794f0c4 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -47,11 +47,14 @@ FEATURE_FLAGS = {
'regexp-named-groups': '--harmony-regexp-named-captures',
'regexp-unicode-property-escapes': '--harmony-regexp-property',
'Promise.prototype.finally': '--harmony-promise-finally',
- 'class-fields-public': '--harmony-class-fields',
+ 'class-fields-public': '--harmony-public-fields',
'optional-catch-binding': '--harmony-optional-catch-binding',
+ 'class-fields-private': '--harmony-private-fields',
}
-SKIPPED_FEATURES = set(['class-fields-private'])
+SKIPPED_FEATURES = set(['Array.prototype.flatten',
+ 'Array.prototype.flatMap',
+ 'numeric-separator-literal'])
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
ARCHIVE = DATA + ".tar"
@@ -70,32 +73,6 @@ TEST_262_RELPATH_REGEXP = re.compile(
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
*TEST_262_TOOLS_PATH))
-ALL_VARIANT_FLAGS_STRICT = dict(
- (v, [flags + ["--use-strict"] for flags in flag_sets])
- for v, flag_sets in testsuite.ALL_VARIANT_FLAGS.iteritems()
-)
-
-ALL_VARIANT_FLAGS_BOTH = dict(
- (v, [flags for flags in testsuite.ALL_VARIANT_FLAGS[v] +
- ALL_VARIANT_FLAGS_STRICT[v]])
- for v in testsuite.ALL_VARIANT_FLAGS
-)
-
-ALL_VARIANTS = {
- 'nostrict': testsuite.ALL_VARIANT_FLAGS,
- 'strict': ALL_VARIANT_FLAGS_STRICT,
- 'both': ALL_VARIANT_FLAGS_BOTH,
-}
-
-class LegacyVariantsGenerator(testsuite.LegacyVariantsGenerator):
- def GetFlagSets(self, test, variant):
- test_record = test.test_record
- if "noStrict" in test_record:
- return ALL_VARIANTS["nostrict"][variant]
- if "onlyStrict" in test_record:
- return ALL_VARIANTS["strict"][variant]
- return ALL_VARIANTS["both"][variant]
-
class VariantsGenerator(testsuite.VariantsGenerator):
def gen(self, test):
@@ -116,8 +93,8 @@ class TestSuite(testsuite.TestSuite):
# Match the (...) in '/path/to/v8/test/test262/subdir/test/(...).js'
# In practice, subdir is data or local-tests
- def __init__(self, name, root):
- super(TestSuite, self).__init__(name, root)
+ def __init__(self, *args, **kwargs):
+ super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
self.harness = [os.path.join(self.harnesspath, f)
@@ -158,13 +135,13 @@ class TestSuite(testsuite.TestSuite):
if f:
f.close()
- def ListTests(self, context):
+ def ListTests(self):
testnames = set()
for dirname, dirs, files in itertools.chain(os.walk(self.testroot),
os.walk(self.localtestroot)):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
- if context.noi18n and "intl402" in dirs:
+ if self.test_config.noi18n and "intl402" in dirs:
dirs.remove("intl402")
dirs.sort()
files.sort()
@@ -184,9 +161,6 @@ class TestSuite(testsuite.TestSuite):
def _test_class(self):
return TestCase
- def _LegacyVariantsGeneratorFactory(self):
- return LegacyVariantsGenerator
-
def _variants_gen_class(self):
return VariantsGenerator
@@ -203,7 +177,7 @@ class TestCase(testcase.TestCase):
.get('type', None)
)
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return (
list(self.suite.harness) +
([os.path.join(self.suite.root, "harness-agent.js")]
@@ -213,7 +187,7 @@ class TestCase(testcase.TestCase):
[self._get_source_path()]
)
- def _get_suite_flags(self, ctx):
+ def _get_suite_flags(self):
return (
(["--throws"] if "negative" in self.test_record else []) +
(["--allow-natives-syntax"]
@@ -250,5 +224,5 @@ class TestCase(testcase.TestCase):
return test262.NoExceptionOutProc(self.expected_outcomes)
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 7f70a5c959..4b1447d34b 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -98,6 +98,7 @@ v8_source_set("unittests_sources") {
"compiler/instruction-unittest.cc",
"compiler/int64-lowering-unittest.cc",
"compiler/js-builtin-reducer-unittest.cc",
+ "compiler/js-call-reducer-unittest.cc",
"compiler/js-create-lowering-unittest.cc",
"compiler/js-intrinsic-lowering-unittest.cc",
"compiler/js-operator-unittest.cc",
@@ -143,6 +144,7 @@ v8_source_set("unittests_sources") {
"heap/item-parallel-job-unittest.cc",
"heap/marking-unittest.cc",
"heap/memory-reducer-unittest.cc",
+ "heap/object-stats-unittest.cc",
"heap/scavenge-job-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
diff --git a/deps/v8/test/unittests/allocation-unittest.cc b/deps/v8/test/unittests/allocation-unittest.cc
index 42904da149..68cd8a77e0 100644
--- a/deps/v8/test/unittests/allocation-unittest.cc
+++ b/deps/v8/test/unittests/allocation-unittest.cc
@@ -112,6 +112,7 @@ TEST_F(MemoryAllocationPermissionsTest, DoTest) {
TestPermissions(PageAllocator::Permission::kNoAccess, false, false);
TestPermissions(PageAllocator::Permission::kReadWrite, true, true);
TestPermissions(PageAllocator::Permission::kReadWriteExecute, true, true);
+ TestPermissions(PageAllocator::Permission::kReadExecute, true, false);
}
#endif // V8_OS_POSIX
@@ -134,8 +135,8 @@ TEST(AllocationTest, AllocateAndFree) {
// A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * v8::internal::MB;
void* aligned_mem_addr = v8::internal::AllocatePages(
- v8::internal::GetRandomMmapAddr(), kAllocationSize, kBigAlignment,
- PageAllocator::Permission::kReadWrite);
+ AlignedAddress(v8::internal::GetRandomMmapAddr(), kBigAlignment),
+ kAllocationSize, kBigAlignment, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
CHECK(v8::internal::FreePages(aligned_mem_addr, kAllocationSize));
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index 7417e1c70b..cbd5d9e81b 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -24,6 +24,163 @@
namespace v8 {
namespace base {
+TEST(TimeDelta, ZeroMinMax) {
+ constexpr TimeDelta kZero;
+ static_assert(kZero.IsZero(), "");
+
+ constexpr TimeDelta kMax = TimeDelta::Max();
+ static_assert(kMax.IsMax(), "");
+ static_assert(kMax == TimeDelta::Max(), "");
+ EXPECT_GT(kMax, TimeDelta::FromDays(100 * 365));
+ static_assert(kMax > kZero, "");
+
+ constexpr TimeDelta kMin = TimeDelta::Min();
+ static_assert(kMin.IsMin(), "");
+ static_assert(kMin == TimeDelta::Min(), "");
+ EXPECT_LT(kMin, TimeDelta::FromDays(-100 * 365));
+ static_assert(kMin < kZero, "");
+}
+
+TEST(TimeDelta, MaxConversions) {
+ // static_assert also confirms constexpr works as intended.
+ constexpr TimeDelta kMax = TimeDelta::Max();
+ EXPECT_EQ(kMax.InDays(), std::numeric_limits<int>::max());
+ EXPECT_EQ(kMax.InHours(), std::numeric_limits<int>::max());
+ EXPECT_EQ(kMax.InMinutes(), std::numeric_limits<int>::max());
+ EXPECT_EQ(kMax.InSecondsF(), std::numeric_limits<double>::infinity());
+ EXPECT_EQ(kMax.InSeconds(), std::numeric_limits<int64_t>::max());
+ EXPECT_EQ(kMax.InMillisecondsF(), std::numeric_limits<double>::infinity());
+ EXPECT_EQ(kMax.InMilliseconds(), std::numeric_limits<int64_t>::max());
+ EXPECT_EQ(kMax.InMillisecondsRoundedUp(),
+ std::numeric_limits<int64_t>::max());
+
+ // TODO(v8-team): Import overflow support from Chromium's base.
+
+ // EXPECT_TRUE(TimeDelta::FromDays(std::numeric_limits<int>::max()).IsMax());
+
+ // EXPECT_TRUE(
+ // TimeDelta::FromHours(std::numeric_limits<int>::max()).IsMax());
+
+ // EXPECT_TRUE(
+ // TimeDelta::FromMinutes(std::numeric_limits<int>::max()).IsMax());
+
+ // constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
+ // constexpr int64_t min_int = std::numeric_limits<int64_t>::min();
+
+ // EXPECT_TRUE(
+ // TimeDelta::FromSeconds(max_int / Time::kMicrosecondsPerSecond + 1)
+ // .IsMax());
+
+ // EXPECT_TRUE(TimeDelta::FromMilliseconds(
+ // max_int / Time::kMillisecondsPerSecond + 1)
+ // .IsMax());
+
+ // EXPECT_TRUE(TimeDelta::FromMicroseconds(max_int).IsMax());
+
+ // EXPECT_TRUE(
+ // TimeDelta::FromSeconds(min_int / Time::kMicrosecondsPerSecond - 1)
+ // .IsMin());
+
+ // EXPECT_TRUE(TimeDelta::FromMilliseconds(
+ // min_int / Time::kMillisecondsPerSecond - 1)
+ // .IsMin());
+
+ // EXPECT_TRUE(TimeDelta::FromMicroseconds(min_int).IsMin());
+
+ // EXPECT_TRUE(
+ // TimeDelta::FromMicroseconds(std::numeric_limits<int64_t>::min())
+ // .IsMin());
+}
+
+TEST(TimeDelta, NumericOperators) {
+ constexpr int i = 2;
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ (TimeDelta::FromMilliseconds(1000) * i));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ (TimeDelta::FromMilliseconds(1000) / i));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ (TimeDelta::FromMilliseconds(1000) *= i));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ (TimeDelta::FromMilliseconds(1000) /= i));
+
+ constexpr int64_t i64 = 2;
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ (TimeDelta::FromMilliseconds(1000) * i64));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ (TimeDelta::FromMilliseconds(1000) / i64));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ (TimeDelta::FromMilliseconds(1000) *= i64));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ (TimeDelta::FromMilliseconds(1000) /= i64));
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ (TimeDelta::FromMilliseconds(1000) * 2));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ (TimeDelta::FromMilliseconds(1000) / 2));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ (TimeDelta::FromMilliseconds(1000) *= 2));
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ (TimeDelta::FromMilliseconds(1000) /= 2));
+}
+
+// TODO(v8-team): Import support for overflow from Chromium's base.
+TEST(TimeDelta, DISABLED_Overflows) {
+ // Some sanity checks. static_assert's used were possible to verify constexpr
+ // evaluation at the same time.
+ static_assert(TimeDelta::Max().IsMax(), "");
+ static_assert(-TimeDelta::Max() < TimeDelta(), "");
+ static_assert(-TimeDelta::Max() > TimeDelta::Min(), "");
+ static_assert(TimeDelta() > -TimeDelta::Max(), "");
+
+ TimeDelta large_delta = TimeDelta::Max() - TimeDelta::FromMilliseconds(1);
+ TimeDelta large_negative = -large_delta;
+ EXPECT_GT(TimeDelta(), large_negative);
+ EXPECT_FALSE(large_delta.IsMax());
+ EXPECT_FALSE((-large_negative).IsMin());
+ const TimeDelta kOneSecond = TimeDelta::FromSeconds(1);
+
+ // Test +, -, * and / operators.
+ EXPECT_TRUE((large_delta + kOneSecond).IsMax());
+ EXPECT_TRUE((large_negative + (-kOneSecond)).IsMin());
+ EXPECT_TRUE((large_negative - kOneSecond).IsMin());
+ EXPECT_TRUE((large_delta - (-kOneSecond)).IsMax());
+ EXPECT_TRUE((large_delta * 2).IsMax());
+ EXPECT_TRUE((large_delta * -2).IsMin());
+
+ // Test +=, -=, *= and /= operators.
+ TimeDelta delta = large_delta;
+ delta += kOneSecond;
+ EXPECT_TRUE(delta.IsMax());
+ delta = large_negative;
+ delta += -kOneSecond;
+ EXPECT_TRUE((delta).IsMin());
+
+ delta = large_negative;
+ delta -= kOneSecond;
+ EXPECT_TRUE((delta).IsMin());
+ delta = large_delta;
+ delta -= -kOneSecond;
+ EXPECT_TRUE(delta.IsMax());
+
+ delta = large_delta;
+ delta *= 2;
+ EXPECT_TRUE(delta.IsMax());
+
+ // Test operations with Time and TimeTicks.
+ EXPECT_TRUE((large_delta + Time::Now()).IsMax());
+ EXPECT_TRUE((large_delta + TimeTicks::Now()).IsMax());
+ EXPECT_TRUE((Time::Now() + large_delta).IsMax());
+ EXPECT_TRUE((TimeTicks::Now() + large_delta).IsMax());
+
+ Time time_now = Time::Now();
+ EXPECT_EQ(kOneSecond, (time_now + kOneSecond) - time_now);
+ EXPECT_EQ(-kOneSecond, (time_now - kOneSecond) - time_now);
+
+ TimeTicks ticks_now = TimeTicks::Now();
+ EXPECT_EQ(-kOneSecond, (ticks_now - kOneSecond) - ticks_now);
+ EXPECT_EQ(kOneSecond, (ticks_now + kOneSecond) - ticks_now);
+}
+
TEST(TimeDelta, FromAndIn) {
EXPECT_EQ(TimeDelta::FromDays(2), TimeDelta::FromHours(48));
EXPECT_EQ(TimeDelta::FromHours(3), TimeDelta::FromMinutes(180));
@@ -54,6 +211,47 @@ TEST(TimeDelta, MachTimespec) {
}
#endif
+TEST(Time, Max) {
+ Time max = Time::Max();
+ EXPECT_TRUE(max.IsMax());
+ EXPECT_EQ(max, Time::Max());
+ EXPECT_GT(max, Time::Now());
+ EXPECT_GT(max, Time());
+}
+
+TEST(Time, MaxConversions) {
+ Time t = Time::Max();
+ EXPECT_EQ(std::numeric_limits<int64_t>::max(), t.ToInternalValue());
+
+// TODO(v8-team): Time::FromJsTime() overflows with infinity. Import support
+// from Chromium's base.
+// t = Time::FromJsTime(std::numeric_limits<double>::infinity());
+// EXPECT_TRUE(t.IsMax());
+// EXPECT_EQ(std::numeric_limits<double>::infinity(), t.ToJsTime());
+
+#if defined(OS_POSIX)
+ struct timeval tval;
+ tval.tv_sec = std::numeric_limits<time_t>::max();
+ tval.tv_usec = static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1;
+ t = Time::FromTimeVal(tval);
+ EXPECT_TRUE(t.IsMax());
+ tval = t.ToTimeVal();
+ EXPECT_EQ(std::numeric_limits<time_t>::max(), tval.tv_sec);
+ EXPECT_EQ(static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1,
+ tval.tv_usec);
+#endif
+
+#if defined(OS_WIN)
+ FILETIME ftime;
+ ftime.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ ftime.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ t = Time::FromFileTime(ftime);
+ EXPECT_TRUE(t.IsMax());
+ ftime = t.ToFileTime();
+ EXPECT_EQ(std::numeric_limits<DWORD>::max(), ftime.dwHighDateTime);
+ EXPECT_EQ(std::numeric_limits<DWORD>::max(), ftime.dwLowDateTime);
+#endif
+}
TEST(Time, JsTime) {
Time t = Time::FromJsTime(700000.3);
@@ -153,21 +351,15 @@ TEST(Time, NowResolution) {
TEST(TimeTicks, NowResolution) {
- // We assume that TimeTicks::Now() has at least 16ms resolution.
- static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+ // TimeTicks::Now() is documented as having "no worse than one microsecond"
+ // resolution. Unless !TimeTicks::IsHighResolution() in which case the clock
+ // could be as coarse as ~15.6ms.
+ const TimeDelta kTargetGranularity = TimeTicks::IsHighResolution()
+ ? TimeDelta::FromMicroseconds(1)
+ : TimeDelta::FromMilliseconds(16);
ResolutionTest<TimeTicks>(&TimeTicks::Now, kTargetGranularity);
}
-
-TEST(TimeTicks, HighResolutionNowResolution) {
- if (!TimeTicks::IsHighResolutionClockWorking()) return;
-
- // We assume that TimeTicks::HighResolutionNow() has sub-ms resolution.
- static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(1);
- ResolutionTest<TimeTicks>(&TimeTicks::HighResolutionNow, kTargetGranularity);
-}
-
-
TEST(TimeTicks, IsMonotonic) {
TimeTicks previous_normal_ticks;
TimeTicks previous_highres_ticks;
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index a0ddd1e5e4..fe97cb30d8 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -336,7 +336,7 @@ TEST_F(CompilerDispatcherTest, IsEnqueued) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(dispatcher.Enqueue(shared));
ASSERT_TRUE(dispatcher.IsEnqueued(shared));
- dispatcher.AbortAll(CompilerDispatcher::BlockingBehavior::kBlock);
+ dispatcher.AbortAll(BlockingBehavior::kBlock);
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(platform.IdleTaskPending());
platform.ClearIdleTask();
@@ -640,7 +640,7 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask) {
ASSERT_TRUE(platform.BackgroundTasksPending());
// The background task hasn't yet started, so we can just cancel it.
- dispatcher.AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
+ dispatcher.AbortAll(BlockingBehavior::kDontBlock);
ASSERT_FALSE(platform.ForegroundTasksPending());
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
@@ -692,7 +692,7 @@ TEST_F(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask) {
// Busy loop until the background task started running.
while (dispatcher.block_for_testing_.Value()) {
}
- dispatcher.AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
+ dispatcher.AbortAll(BlockingBehavior::kDontBlock);
ASSERT_TRUE(platform.ForegroundTasksPending());
// We can't schedule new tasks while we're aborting.
@@ -768,7 +768,7 @@ TEST_F(CompilerDispatcherTest, FinishNowDuringAbortAll) {
// Busy loop until the background task started running.
while (dispatcher.block_for_testing_.Value()) {
}
- dispatcher.AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
+ dispatcher.AbortAll(BlockingBehavior::kDontBlock);
ASSERT_TRUE(platform.ForegroundTasksPending());
// Run the first AbortTask. Since the background job is still pending, it
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index d5c37264a5..529b518070 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -84,7 +84,7 @@ TEST_F(OptimizingCompileDispatcherTest, NonBlockingFlush) {
}
// Should not block.
- dispatcher.Flush(OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
+ dispatcher.Flush(BlockingBehavior::kDontBlock);
// Unblock the job & finish.
job->Signal();
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index ab1c57d9c5..becc1d8405 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -3257,6 +3257,15 @@ TEST_F(InstructionSelectorTest, Float64Neg) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, SpeculationFence) {
+ StreamBuilder m(this, MachineType::Int32());
+ m.SpeculationFence();
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmDsbIsb, s[0]->arch_opcode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 013d96f26d..f8b0a478e2 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -4417,6 +4417,15 @@ TEST_F(InstructionSelectorTest, CompareFloat64HighGreaterThanOrEqualZero64) {
EXPECT_EQ(63, s.ToInt32(s[1]->InputAt(1)));
}
+TEST_F(InstructionSelectorTest, SpeculationFence) {
+ StreamBuilder m(this, MachineType::Int32());
+ m.SpeculationFence();
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64DsbIsb, s[0]->arch_opcode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index dd8b661fcf..48c15934df 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -11,6 +11,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects-inl.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -452,6 +453,53 @@ TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
EnsureLivenessMatches(bytecode, expected_liveness);
}
+TEST_F(BytecodeAnalysisTest, SuspendPoint) {
+ interpreter::BytecodeArrayBuilder builder(zone(), 3, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+ interpreter::Register reg_1(1);
+ interpreter::Register reg_gen(2);
+ interpreter::BytecodeJumpTable* gen_jump_table =
+ builder.AllocateJumpTable(1, 0);
+
+ builder.StoreAccumulatorInRegister(reg_gen);
+ expected_liveness.emplace_back("L..L", "L.LL");
+
+ // Note: technically, r0 should be dead here since the resume will write it,
+ // but in practice the bytecode analysis doesn't bother to special case it,
+ // since the generator switch is close to the top of the function anyway.
+ builder.SwitchOnGeneratorState(reg_gen, gen_jump_table);
+ expected_liveness.emplace_back("L.LL", "L.LL");
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back("..LL", "L.LL");
+
+ // Reg 1 is never read, so should be dead.
+ builder.StoreAccumulatorInRegister(reg_1);
+ expected_liveness.emplace_back("L.LL", "L.LL");
+
+ builder.SuspendGenerator(
+ reg_gen, interpreter::BytecodeUtils::NewRegisterList(0, 3), 0);
+ expected_liveness.emplace_back("L.LL", "L.L.");
+
+ builder.Bind(gen_jump_table, 0);
+
+ builder.ResumeGenerator(reg_gen,
+ interpreter::BytecodeUtils::NewRegisterList(0, 1));
+ expected_liveness.emplace_back("L.L.", "L...");
+
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("L...", "...L");
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index 6618dfb452..3cae7e60d7 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -31,7 +31,7 @@ class CommonOperatorReducerTest : public GraphTest {
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
flags);
- CommonOperatorReducer reducer(editor, graph(), common(), &machine);
+ CommonOperatorReducer reducer(editor, graph(), common(), &machine, zone());
return reducer.Reduce(node);
}
@@ -492,6 +492,52 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
+// -----------------------------------------------------------------------------
+// Switch
+
+TEST_F(CommonOperatorReducerTest, SwitchInputMatchesCaseWithDefault) {
+ Node* const control = graph()->start();
+
+ Node* sw = graph()->NewNode(common()->Switch(2), Int32Constant(1), control);
+ Node* const if_1 = graph()->NewNode(common()->IfValue(1), sw);
+ graph()->NewNode(common()->IfDefault(), sw);
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_1, control));
+ Reduction r = Reduce(&editor, sw);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
+TEST_F(CommonOperatorReducerTest, SwitchInputMatchesDefaultWithCase) {
+ Node* const control = graph()->start();
+
+ Node* sw = graph()->NewNode(common()->Switch(2), Int32Constant(0), control);
+ graph()->NewNode(common()->IfValue(1), sw);
+ Node* const if_default = graph()->NewNode(common()->IfDefault(), sw);
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_default, control));
+ Reduction r = Reduce(&editor, sw);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
+TEST_F(CommonOperatorReducerTest, SwitchInputMatchesCaseExtraCaseWithDefault) {
+ Node* const control = graph()->start();
+
+ Node* sw = graph()->NewNode(common()->Switch(3), Int32Constant(0), control);
+ Node* const if_0 = graph()->NewNode(common()->IfValue(0), sw);
+ graph()->NewNode(common()->IfValue(1), sw);
+ graph()->NewNode(common()->IfDefault(), sw);
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_0, control));
+ Reduction r = Reduce(&editor, sw);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
} // namespace common_operator_reducer_unittest
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index f2767a0bb8..ecf76e6146 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -830,26 +830,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
// -----------------------------------------------------------------------------
// Miscellaneous.
-
-TEST_F(InstructionSelectorTest, Uint32LessThanWithLoadAndLoadStackPointer) {
- StreamBuilder m(this, MachineType::Bool());
- Node* const sl = m.Load(
- MachineType::Pointer(),
- m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
- Node* const sp = m.LoadStackPointer();
- Node* const n = m.Uint32LessThan(sl, sp);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kIA32StackCheck, s[0]->arch_opcode());
- ASSERT_EQ(0U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_set, s[0]->flags_mode());
- EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition());
-}
-
-
TEST_F(InstructionSelectorTest, Word32Clz) {
StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 3444e0542a..1c2213d138 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -43,6 +43,8 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
SourcePositionTable source_position_table(graph());
InstructionSelector selector(test_->zone(), node_count, &linkage, &sequence,
schedule, &source_position_table, nullptr,
+ InstructionSelector::kEnableSwitchJumpTable,
+ InstructionSelector::kEnableSpeculationPoison,
source_position_mode, features,
InstructionSelector::kDisableScheduling);
selector.SelectInstructions();
@@ -361,7 +363,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
ZoneVector<MachineType> empty_types(zone());
- CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
+ auto call_descriptor = Linkage::GetJSCallDescriptor(
zone(), false, 1, CallDescriptor::kNeedsFrameState);
// Build frame state for the state before the call.
@@ -382,7 +384,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
// Build the call.
Node* nodes[] = {function_node, receiver, m.UndefinedConstant(),
m.Int32Constant(1), context, state_node};
- Node* call = m.CallNWithFrameState(descriptor, arraysize(nodes), nodes);
+ Node* call = m.CallNWithFrameState(call_descriptor, arraysize(nodes), nodes);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -418,7 +420,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
ZoneVector<MachineType> tagged_type(1, MachineType::AnyTagged(), zone());
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 1,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
@@ -443,7 +445,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
// Build the call.
Node* stub_code = m.HeapConstant(callable.code());
Node* nodes[] = {stub_code, function_node, receiver, context, state_node};
- Node* call = m.CallNWithFrameState(descriptor, arraysize(nodes), nodes);
+ Node* call = m.CallNWithFrameState(call_descriptor, arraysize(nodes), nodes);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -513,7 +515,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 1,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
@@ -552,7 +554,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
// Build the call.
Node* stub_code = m.HeapConstant(callable.code());
Node* nodes[] = {stub_code, function_node, receiver, context2, state_node};
- Node* call = m.CallNWithFrameState(descriptor, arraysize(nodes), nodes);
+ Node* call = m.CallNWithFrameState(call_descriptor, arraysize(nodes), nodes);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index ddb8408e5f..2098249bd6 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -337,12 +337,13 @@ TEST_F(Int64LoweringTest, CallI64Return) {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
sig_builder.AddReturn(MachineRepresentation::kWord64);
- compiler::CallDescriptor* desc =
+ auto call_descriptor =
compiler::GetWasmCallDescriptor(zone(), sig_builder.Build());
- LowerGraph(graph()->NewNode(common()->Call(desc), Int32Constant(function),
- context_address, start(), start()),
- MachineRepresentation::kWord64);
+ LowerGraph(
+ graph()->NewNode(common()->Call(call_descriptor), Int32Constant(function),
+ context_address, start(), start()),
+ MachineRepresentation::kWord64);
Capture<Node*> call;
Matcher<Node*> call_matcher =
@@ -356,7 +357,7 @@ TEST_F(Int64LoweringTest, CallI64Return) {
CompareCallDescriptors(
OpParameter<const CallDescriptor*>(
graph()->end()->InputAt(1)->InputAt(1)->InputAt(0)),
- compiler::GetI32WasmCallDescriptor(zone(), desc));
+ compiler::GetI32WasmCallDescriptor(zone(), call_descriptor));
}
TEST_F(Int64LoweringTest, CallI64Parameter) {
@@ -369,14 +370,15 @@ TEST_F(Int64LoweringTest, CallI64Parameter) {
sig_builder.AddParam(MachineRepresentation::kWord32);
sig_builder.AddParam(MachineRepresentation::kWord64);
- compiler::CallDescriptor* desc =
+ auto call_descriptor =
compiler::GetWasmCallDescriptor(zone(), sig_builder.Build());
- LowerGraph(graph()->NewNode(common()->Call(desc), Int32Constant(function),
- context_address, Int64Constant(value(0)),
- Int32Constant(low_word_value(1)),
- Int64Constant(value(2)), start(), start()),
- MachineRepresentation::kWord32);
+ LowerGraph(
+ graph()->NewNode(common()->Call(call_descriptor), Int32Constant(function),
+ context_address, Int64Constant(value(0)),
+ Int32Constant(low_word_value(1)),
+ Int64Constant(value(2)), start(), start()),
+ MachineRepresentation::kWord32);
EXPECT_THAT(
graph()->end()->InputAt(1),
@@ -388,9 +390,10 @@ TEST_F(Int64LoweringTest, CallI64Parameter) {
IsInt32Constant(high_word_value(2)), start(), start()),
start(), start()));
- CompareCallDescriptors(OpParameter<const CallDescriptor*>(
- graph()->end()->InputAt(1)->InputAt(1)),
- compiler::GetI32WasmCallDescriptor(zone(), desc));
+ CompareCallDescriptors(
+ OpParameter<const CallDescriptor*>(
+ graph()->end()->InputAt(1)->InputAt(1)),
+ compiler::GetI32WasmCallDescriptor(zone(), call_descriptor));
}
TEST_F(Int64LoweringTest, Int64Add) {
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index a0ea6f376e..4ded3fd667 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -196,1220 +196,6 @@ TEST_F(JSBuiltinReducerTest, GlobalIsNaNWithPlainPrimitive) {
}
// -----------------------------------------------------------------------------
-// Math.abs
-
-TEST_F(JSBuiltinReducerTest, MathAbsWithNumber) {
- Node* function = MathFunction("abs");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAbs(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAbsWithPlainPrimitive) {
- Node* function = MathFunction("abs");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAbs(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.acos
-
-TEST_F(JSBuiltinReducerTest, MathAcosWithNumber) {
- Node* function = MathFunction("acos");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAcos(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAcosWithPlainPrimitive) {
- Node* function = MathFunction("acos");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAcos(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.acosh
-
-TEST_F(JSBuiltinReducerTest, MathAcoshWithNumber) {
- Node* function = MathFunction("acosh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAcosh(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAcoshWithPlainPrimitive) {
- Node* function = MathFunction("acosh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAcosh(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.asin
-
-TEST_F(JSBuiltinReducerTest, MathAsinWithNumber) {
- Node* function = MathFunction("asin");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAsin(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAsinWithPlainPrimitive) {
- Node* function = MathFunction("asin");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAsin(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.asinh
-
-TEST_F(JSBuiltinReducerTest, MathAsinhWithNumber) {
- Node* function = MathFunction("asinh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAsinh(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAsinhWithPlainPrimitive) {
- Node* function = MathFunction("asinh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAsinh(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.atan
-
-TEST_F(JSBuiltinReducerTest, MathAtanWithNumber) {
- Node* function = MathFunction("atan");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAtan(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAtanWithPlainPrimitive) {
- Node* function = MathFunction("atan");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAtan(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.atanh
-
-TEST_F(JSBuiltinReducerTest, MathAtanhWithNumber) {
- Node* function = MathFunction("atanh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAtanh(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAtanhWithPlainPrimitive) {
- Node* function = MathFunction("atanh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAtanh(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.atan2
-
-TEST_F(JSBuiltinReducerTest, MathAtan2WithNumber) {
- Node* function = MathFunction("atan2");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- TRACED_FOREACH(Type*, t1, kNumberTypes) {
- Node* p1 = Parameter(t1, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(),
- p0, p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAtan2(p0, p1));
- }
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathAtan2WithPlainPrimitive) {
- Node* function = MathFunction("atan2");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* p1 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(), p0,
- p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberAtan2(IsPlainPrimitiveToNumber(p0),
- IsPlainPrimitiveToNumber(p1)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.ceil
-
-TEST_F(JSBuiltinReducerTest, MathCeilWithNumber) {
- Node* function = MathFunction("ceil");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberCeil(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathCeilWithPlainPrimitive) {
- Node* function = MathFunction("ceil");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberCeil(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.clz32
-
-TEST_F(JSBuiltinReducerTest, MathClz32WithUnsigned32) {
- Node* function = MathFunction("clz32");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::Unsigned32(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberClz32(p0));
-}
-
-TEST_F(JSBuiltinReducerTest, MathClz32WithNumber) {
- Node* function = MathFunction("clz32");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::Number(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberClz32(IsNumberToUint32(p0)));
-}
-
-TEST_F(JSBuiltinReducerTest, MathClz32WithPlainPrimitive) {
- Node* function = MathFunction("clz32");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberClz32(IsNumberToUint32(IsPlainPrimitiveToNumber(p0))));
-}
-
-// -----------------------------------------------------------------------------
-// Math.cos
-
-TEST_F(JSBuiltinReducerTest, MathCosWithNumber) {
- Node* function = MathFunction("cos");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberCos(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathCosWithPlainPrimitive) {
- Node* function = MathFunction("cos");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberCos(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.cosh
-
-TEST_F(JSBuiltinReducerTest, MathCoshWithNumber) {
- Node* function = MathFunction("cosh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberCosh(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathCoshWithPlainPrimitive) {
- Node* function = MathFunction("cosh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberCosh(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.exp
-
-TEST_F(JSBuiltinReducerTest, MathExpWithNumber) {
- Node* function = MathFunction("exp");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberExp(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathExpWithPlainPrimitive) {
- Node* function = MathFunction("exp");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberExp(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.floor
-
-TEST_F(JSBuiltinReducerTest, MathFloorWithNumber) {
- Node* function = MathFunction("floor");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberFloor(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathFloorWithPlainPrimitive) {
- Node* function = MathFunction("floor");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberFloor(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.fround
-
-TEST_F(JSBuiltinReducerTest, MathFroundWithNumber) {
- Node* function = MathFunction("fround");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberFround(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathFroundWithPlainPrimitive) {
- Node* function = MathFunction("fround");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberFround(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.imul
-
-TEST_F(JSBuiltinReducerTest, MathImulWithUnsigned32) {
- Node* function = MathFunction("imul");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::Unsigned32(), 0);
- Node* p1 = Parameter(Type::Unsigned32(), 1);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(), p0,
- p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberImul(p0, p1));
-}
-
-TEST_F(JSBuiltinReducerTest, MathImulWithNumber) {
- Node* function = MathFunction("imul");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::Number(), 0);
- Node* p1 = Parameter(Type::Number(), 1);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(), p0,
- p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
-}
-
-TEST_F(JSBuiltinReducerTest, MathImulWithPlainPrimitive) {
- Node* function = MathFunction("imul");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* p1 = Parameter(Type::PlainPrimitive(), 1);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(), p0,
- p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberImul(IsNumberToUint32(IsPlainPrimitiveToNumber(p0)),
- IsNumberToUint32(IsPlainPrimitiveToNumber(p1))));
-}
-
-// -----------------------------------------------------------------------------
-// Math.log
-
-TEST_F(JSBuiltinReducerTest, MathLogWithNumber) {
- Node* function = MathFunction("log");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberLog(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathLogWithPlainPrimitive) {
- Node* function = MathFunction("log");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberLog(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.log1p
-
-TEST_F(JSBuiltinReducerTest, MathLog1pWithNumber) {
- Node* function = MathFunction("log1p");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberLog1p(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathLog1pWithPlainPrimitive) {
- Node* function = MathFunction("log1p");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberLog1p(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.max
-
-TEST_F(JSBuiltinReducerTest, MathMaxWithNoArguments) {
- Node* function = MathFunction("max");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* call =
- graph()->NewNode(javascript()->Call(2), function, UndefinedConstant(),
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
-}
-
-TEST_F(JSBuiltinReducerTest, MathMaxWithNumber) {
- Node* function = MathFunction("max");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathMaxWithPlainPrimitive) {
- Node* function = MathFunction("max");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* p1 = Parameter(Type::PlainPrimitive(), 1);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(), p0,
- p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberMax(IsPlainPrimitiveToNumber(p0),
- IsPlainPrimitiveToNumber(p1)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.min
-
-TEST_F(JSBuiltinReducerTest, MathMinWithNoArguments) {
- Node* function = MathFunction("min");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* call =
- graph()->NewNode(javascript()->Call(2), function, UndefinedConstant(),
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(V8_INFINITY));
-}
-
-TEST_F(JSBuiltinReducerTest, MathMinWithNumber) {
- Node* function = MathFunction("min");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathMinWithPlainPrimitive) {
- Node* function = MathFunction("min");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* p1 = Parameter(Type::PlainPrimitive(), 1);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(), p0,
- p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberMin(IsPlainPrimitiveToNumber(p0),
- IsPlainPrimitiveToNumber(p1)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.round
-
-TEST_F(JSBuiltinReducerTest, MathRoundWithNumber) {
- Node* function = MathFunction("round");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberRound(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathRoundWithPlainPrimitive) {
- Node* function = MathFunction("round");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberRound(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.pow
-
-TEST_F(JSBuiltinReducerTest, MathPowWithNumber) {
- Node* function = MathFunction("pow");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- TRACED_FOREACH(Type*, t1, kNumberTypes) {
- Node* p1 = Parameter(t1, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(),
- p0, p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberPow(p0, p1));
- }
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathPowWithPlainPrimitive) {
- Node* function = MathFunction("pow");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* p1 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(4), function, UndefinedConstant(), p0,
- p1, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberPow(IsPlainPrimitiveToNumber(p0),
- IsPlainPrimitiveToNumber(p1)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.sign
-
-TEST_F(JSBuiltinReducerTest, MathSignWithNumber) {
- Node* function = MathFunction("sign");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSign(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathSignWithPlainPrimitive) {
- Node* function = MathFunction("sign");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSign(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.sin
-
-TEST_F(JSBuiltinReducerTest, MathSinWithNumber) {
- Node* function = MathFunction("sin");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSin(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathSinWithPlainPrimitive) {
- Node* function = MathFunction("sin");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSin(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.sinh
-
-TEST_F(JSBuiltinReducerTest, MathSinhWithNumber) {
- Node* function = MathFunction("sinh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSinh(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathSinhWithPlainPrimitive) {
- Node* function = MathFunction("sinh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSinh(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.sqrt
-
-TEST_F(JSBuiltinReducerTest, MathSqrtWithNumber) {
- Node* function = MathFunction("sqrt");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSqrt(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathSqrtWithPlainPrimitive) {
- Node* function = MathFunction("sqrt");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberSqrt(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.tan
-
-TEST_F(JSBuiltinReducerTest, MathTanWithNumber) {
- Node* function = MathFunction("tan");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberTan(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathTanWithPlainPrimitive) {
- Node* function = MathFunction("tan");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberTan(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.tanh
-
-TEST_F(JSBuiltinReducerTest, MathTanhWithNumber) {
- Node* function = MathFunction("tanh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberTanh(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathTanhWithPlainPrimitive) {
- Node* function = MathFunction("tanh");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberTanh(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
-// Math.trunc
-
-TEST_F(JSBuiltinReducerTest, MathTruncWithNumber) {
- Node* function = MathFunction("trunc");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberTrunc(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, MathTruncWithPlainPrimitive) {
- Node* function = MathFunction("trunc");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberTrunc(IsPlainPrimitiveToNumber(p0)));
-}
-
-// -----------------------------------------------------------------------------
// Number.isFinite
TEST_F(JSBuiltinReducerTest, NumberIsFiniteWithNumber) {
@@ -1542,46 +328,6 @@ TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32AndUndefined) {
}
}
-// -----------------------------------------------------------------------------
-// String.fromCharCode
-
-TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithNumber) {
- Node* function = StringFunction("fromCharCode");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(),
- p0, context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsStringFromCharCode(p0));
- }
-}
-
-TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithPlainPrimitive) {
- Node* function = StringFunction("fromCharCode");
-
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* context = UndefinedConstant();
- Node* frame_state = graph()->start();
- Node* p0 = Parameter(Type::PlainPrimitive(), 0);
- Node* call =
- graph()->NewNode(javascript()->Call(3), function, UndefinedConstant(), p0,
- context, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStringFromCharCode(IsPlainPrimitiveToNumber(p0)));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
new file mode 100644
index 0000000000..2a917d3c36
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -0,0 +1,444 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cctype>
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/js-call-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/factory.h"
+#include "src/feedback-vector.h"
+#include "src/isolate.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSCallReducerTest : public TypedGraphTest {
+ public:
+ JSCallReducerTest()
+ : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {}
+ ~JSCallReducerTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine(zone());
+ SimplifiedOperatorBuilder simplified(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+
+ JSCallReducer reducer(&graph_reducer, &jsgraph, JSCallReducer::kNoFlags,
+ native_context(), &deps_);
+ return reducer.Reduce(node);
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ static void SetUpTestCase() {
+ old_flag_lazy_ = i::FLAG_lazy_deserialization;
+ old_flag_lazy_handler_ = i::FLAG_lazy_handler_deserialization;
+ i::FLAG_lazy_deserialization = false;
+ i::FLAG_lazy_handler_deserialization = false;
+ TypedGraphTest::SetUpTestCase();
+ }
+
+ static void TearDownTestCase() {
+ TypedGraphTest::TearDownTestCase();
+ i::FLAG_lazy_deserialization = old_flag_lazy_;
+ i::FLAG_lazy_handler_deserialization = old_flag_lazy_handler_;
+ }
+
+ Node* MathFunction(const std::string& name) {
+ Handle<Object> m =
+ JSObject::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked("Math"))
+ .ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name.c_str()))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
+ Node* StringFunction(const char* name) {
+ Handle<Object> m =
+ JSObject::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked("String"))
+ .ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
+ std::string op_name_for(const char* fnc) {
+ std::string string_fnc(fnc);
+ char initial = std::toupper(fnc[0]);
+ return std::string("Number") + initial +
+ string_fnc.substr(1, std::string::npos);
+ }
+
+ const Operator* Call(int arity) {
+ FeedbackVectorSpec spec(zone());
+ spec.AddCallICSlot();
+ Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate(), &spec);
+ Handle<SharedFunctionInfo> shared =
+ isolate()->factory()->NewSharedFunctionInfo(
+ isolate()->factory()->empty_string(), MaybeHandle<Code>(), false);
+ shared->set_feedback_metadata(*metadata);
+ Handle<FeedbackVector> vector = FeedbackVector::New(isolate(), shared);
+ VectorSlotPair feedback(vector, FeedbackSlot(0));
+ return javascript()->Call(arity, CallFrequency(), feedback,
+ ConvertReceiverMode::kAny,
+ SpeculationMode::kAllowSpeculation);
+ }
+
+ private:
+ JSOperatorBuilder javascript_;
+ CompilationDependencies deps_;
+
+ static bool old_flag_lazy_;
+ static bool old_flag_lazy_handler_;
+};
+
+TEST_F(JSCallReducerTest, PromiseConstructorNoArgs) {
+ Node* promise = HeapConstant(handle(native_context()->promise_function()));
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+
+ Node* construct =
+ graph()->NewNode(javascript()->Construct(2), promise, promise, context,
+ frame_state, effect, control);
+
+ Reduction r = Reduce(construct);
+
+ ASSERT_FALSE(r.Changed());
+}
+
+TEST_F(JSCallReducerTest, PromiseConstructorSubclass) {
+ Node* promise = HeapConstant(handle(native_context()->promise_function()));
+ Node* new_target = HeapConstant(handle(native_context()->array_function()));
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+
+ Node* executor = UndefinedConstant();
+ Node* construct =
+ graph()->NewNode(javascript()->Construct(3), promise, executor,
+ new_target, context, frame_state, effect, control);
+
+ Reduction r = Reduce(construct);
+
+ ASSERT_FALSE(r.Changed());
+}
+
+TEST_F(JSCallReducerTest, PromiseConstructorBasic) {
+ Node* promise = HeapConstant(handle(native_context()->promise_function()));
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+
+ Node* executor = UndefinedConstant();
+ Node* construct =
+ graph()->NewNode(javascript()->Construct(3), promise, executor, promise,
+ context, frame_state, effect, control);
+
+ Reduction r = Reduce(construct);
+
+ if (FLAG_experimental_inline_promise_constructor) {
+ ASSERT_TRUE(r.Changed());
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+}
+
+// Exactly the same as PromiseConstructorBasic which expects a reduction,
+// except that we invalidate the protector cell.
+TEST_F(JSCallReducerTest, PromiseConstructorWithHook) {
+ Node* promise = HeapConstant(handle(native_context()->promise_function()));
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+
+ Node* executor = UndefinedConstant();
+ Node* construct =
+ graph()->NewNode(javascript()->Construct(3), promise, executor, promise,
+ context, frame_state, effect, control);
+
+ isolate()->InvalidatePromiseHookProtector();
+
+ Reduction r = Reduce(construct);
+
+ ASSERT_FALSE(r.Changed());
+}
+
+bool JSCallReducerTest::old_flag_lazy_;
+bool JSCallReducerTest::old_flag_lazy_handler_;
+
+// -----------------------------------------------------------------------------
+// Math unaries
+
+namespace {
+
+const char* kMathUnaries[] = {
+ "abs", "acos", "acosh", "asin", "asinh", "atan", "cbrt",
+ "ceil", "cos", "cosh", "exp", "expm1", "floor", "fround",
+ "log", "log1p", "log10", "log2", "round", "sign", "sin",
+ "sinh", "sqrt", "tan", "tanh", "trunc"};
+
+} // namespace
+
+TEST_F(JSCallReducerTest, MathUnaryWithNumber) {
+ TRACED_FOREACH(const char*, fnc, kMathUnaries) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* jsfunction = MathFunction(fnc);
+ Node* p0 = Parameter(Type::Any(), 0);
+ Node* call = graph()->NewNode(Call(3), jsfunction, UndefinedConstant(), p0,
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(std::string(IrOpcode::Mnemonic(r.replacement()->opcode())),
+ op_name_for(fnc));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Math binaries
+
+namespace {
+
+const char* kMathBinaries[] = {"atan2", "pow"};
+
+} // namespace
+
+TEST_F(JSCallReducerTest, MathBinaryWithNumber) {
+ TRACED_FOREACH(const char*, fnc, kMathBinaries) {
+ Node* jsfunction = MathFunction(fnc);
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Any(), 0);
+ Node* p1 = Parameter(Type::Any(), 0);
+ Node* call = graph()->NewNode(Call(4), jsfunction, UndefinedConstant(), p0,
+ p1, context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(std::string(IrOpcode::Mnemonic(r.replacement()->opcode())),
+ op_name_for(fnc));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Math.clz32
+
+TEST_F(JSCallReducerTest, MathClz32WithUnsigned32) {
+ Node* jsfunction = MathFunction("clz32");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* call = graph()->NewNode(Call(3), jsfunction, UndefinedConstant(), p0,
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberClz32(IsNumberToUint32(IsSpeculativeToNumber(p0))));
+}
+
+TEST_F(JSCallReducerTest, MathClz32WithUnsigned32NoArg) {
+ Node* jsfunction = MathFunction("clz32");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+
+ Node* call = graph()->NewNode(Call(2), jsfunction, UndefinedConstant(),
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(32));
+}
+
+// -----------------------------------------------------------------------------
+// Math.imul
+
+TEST_F(JSCallReducerTest, MathImulWithUnsigned32) {
+ Node* jsfunction = MathFunction("imul");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* p1 = Parameter(Type::Unsigned32(), 1);
+ Node* call = graph()->NewNode(Call(4), jsfunction, UndefinedConstant(), p0,
+ p1, context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(std::string(IrOpcode::Mnemonic(r.replacement()->opcode())),
+ op_name_for("imul"));
+}
+
+// -----------------------------------------------------------------------------
+// Math.min
+
+TEST_F(JSCallReducerTest, MathMinWithNoArguments) {
+ Node* jsfunction = MathFunction("min");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* call = graph()->NewNode(Call(2), jsfunction, UndefinedConstant(),
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(V8_INFINITY));
+}
+
+TEST_F(JSCallReducerTest, MathMinWithNumber) {
+ Node* jsfunction = MathFunction("min");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Any(), 0);
+ Node* call = graph()->NewNode(Call(3), jsfunction, UndefinedConstant(), p0,
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeToNumber(p0));
+}
+
+TEST_F(JSCallReducerTest, MathMinWithTwoArguments) {
+ Node* jsfunction = MathFunction("min");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Any(), 0);
+ Node* p1 = Parameter(Type::Any(), 1);
+ Node* call = graph()->NewNode(Call(4), jsfunction, UndefinedConstant(), p0,
+ p1, context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberMin(IsSpeculativeToNumber(p0),
+ IsSpeculativeToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+TEST_F(JSCallReducerTest, MathMaxWithNoArguments) {
+ Node* jsfunction = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* call = graph()->NewNode(Call(2), jsfunction, UndefinedConstant(),
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+TEST_F(JSCallReducerTest, MathMaxWithNumber) {
+ Node* jsfunction = MathFunction("max");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Any(), 0);
+ Node* call = graph()->NewNode(Call(3), jsfunction, UndefinedConstant(), p0,
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeToNumber(p0));
+}
+
+TEST_F(JSCallReducerTest, MathMaxWithTwoArguments) {
+ Node* jsfunction = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Any(), 0);
+ Node* p1 = Parameter(Type::Any(), 1);
+ Node* call = graph()->NewNode(Call(4), jsfunction, UndefinedConstant(), p0,
+ p1, context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberMax(IsSpeculativeToNumber(p0),
+ IsSpeculativeToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// String.fromCharCode
+
+TEST_F(JSCallReducerTest, StringFromCharCodeWithNumber) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Any(), 0);
+ Node* call = graph()->NewNode(Call(3), function, UndefinedConstant(), p0,
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsStringFromCharCode(IsSpeculativeToNumber(p0)));
+}
+
+TEST_F(JSCallReducerTest, StringFromCharCodeWithPlainPrimitive) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(Call(3), function, UndefinedConstant(), p0,
+ context, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsStringFromCharCode(IsSpeculativeToNumber(p0)));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
index 3fce7dd688..4ce9d7f91d 100644
--- a/deps/v8/test/unittests/compiler/mips/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com \ No newline at end of file
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
index 978563cab5..4ce9d7f91d 100644
--- a/deps/v8/test/unittests/compiler/mips64/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips64/OWNERS
@@ -1,2 +1,3 @@
ivica.bogosavljevic@mips.com
Miran.Karic@mips.com
+sreten.kovacevic@mips.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 8e7084d1b1..68bf36c6e2 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -2025,6 +2025,9 @@ Matcher<Node*> IsParameter(const Matcher<int> index_matcher) {
return MakeMatcher(new IsParameterMatcher(index_matcher));
}
+Matcher<Node*> IsSpeculationPoison() {
+ return MakeMatcher(new TestNodeMatcher(IrOpcode::kSpeculationPoison));
+}
Matcher<Node*> IsLoadFramePointer() {
return MakeMatcher(new TestNodeMatcher(IrOpcode::kLoadFramePointer));
@@ -2123,9 +2126,7 @@ IS_BINOP_MATCHER(Float64InsertHighWord32)
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
IS_UNOP_MATCHER(BooleanNot)
-IS_UNOP_MATCHER(BitcastTaggedToWord)
IS_UNOP_MATCHER(BitcastWordToTagged)
-IS_UNOP_MATCHER(BitcastWordToTaggedSigned)
IS_UNOP_MATCHER(TruncateFloat64ToWord32)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
@@ -2187,8 +2188,30 @@ IS_UNOP_MATCHER(Word32Clz)
IS_UNOP_MATCHER(Word32Ctz)
IS_UNOP_MATCHER(Word32Popcnt)
IS_UNOP_MATCHER(Word32ReverseBytes)
+IS_UNOP_MATCHER(SpeculativeToNumber)
#undef IS_UNOP_MATCHER
+// Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is
+// not enabled.
+Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher) {
+#ifdef ENABLE_VERIFY_CSA
+ return MakeMatcher(
+ new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
+#else
+ return input_matcher;
+#endif
+}
+
+Matcher<Node*> IsBitcastWordToTaggedSigned(
+ const Matcher<Node*>& input_matcher) {
+#ifdef ENABLE_VERIFY_CSA
+ return MakeMatcher(
+ new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
+#else
+ return input_matcher;
+#endif
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 3ce6aba0f3..e3de90eb1f 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -336,6 +336,7 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher);
+Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Or(const Matcher<Node*>& lhs_matcher,
@@ -447,6 +448,7 @@ Matcher<Node*> IsNumberToBoolean(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
+Matcher<Node*> IsSpeculationPoison();
Matcher<Node*> IsLoadFramePointer();
Matcher<Node*> IsLoadParentFramePointer();
Matcher<Node*> IsPlainPrimitiveToNumber(const Matcher<Node*>& input_matcher);
@@ -478,6 +480,8 @@ Matcher<Node*> IsWord32ReverseBytes(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsStackSlot();
+Matcher<Node*> IsSpeculativeToNumber(const Matcher<Node*>& value_matcher);
+
// Helpers
static inline Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
@@ -548,7 +552,7 @@ static inline Matcher<Node*> IsChangeUint32ToWord(
return kPointerSize == 8 ? IsChangeUint32ToUint64(matcher) : matcher;
}
-static inline Matcher<Node*> IsTruncateWordToWord32(
+static inline Matcher<Node*> IsTruncateIntPtrToInt32(
const Matcher<Node*>& matcher) {
return kPointerSize == 8 ? IsTruncateInt64ToInt32(matcher) : matcher;
}
diff --git a/deps/v8/test/unittests/compiler/persistent-unittest.cc b/deps/v8/test/unittests/compiler/persistent-unittest.cc
index d65eda0e88..8454aeaeb1 100644
--- a/deps/v8/test/unittests/compiler/persistent-unittest.cc
+++ b/deps/v8/test/unittests/compiler/persistent-unittest.cc
@@ -83,7 +83,9 @@ TEST(PersistentMap, Zip) {
// Provoke hash collisions to stress the iterator.
struct bad_hash {
- size_t operator()(int key) { return static_cast<size_t>(key) % 1000; }
+ size_t operator()(int key) {
+ return base::hash_value(static_cast<size_t>(key) % 1000);
+ }
};
PersistentMap<int, int, bad_hash> a(&zone);
PersistentMap<int, int, bad_hash> b(&zone);
@@ -116,7 +118,13 @@ TEST(PersistentMap, Zip) {
ASSERT_EQ(0, sum_b);
for (auto triple : a.Zip(b)) {
- sum -= std::get<1>(triple) + std::get<2>(triple);
+ int key = std::get<0>(triple);
+ int value_a = std::get<1>(triple);
+ int value_b = std::get<2>(triple);
+ ASSERT_EQ(value_a, a.Get(key));
+ ASSERT_EQ(value_b, b.Get(key));
+ sum -= value_a;
+ sum -= value_b;
}
ASSERT_EQ(0, sum);
}
diff --git a/deps/v8/test/unittests/compiler/regalloc/OWNERS b/deps/v8/test/unittests/compiler/regalloc/OWNERS
index 88646a1206..bfde831c20 100644
--- a/deps/v8/test/unittests/compiler/regalloc/OWNERS
+++ b/deps/v8/test/unittests/compiler/regalloc/OWNERS
@@ -2,4 +2,3 @@ set noparent
bmeurer@chromium.org
jarin@chromium.org
-mtrofin@chromium.org \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 44464b9476..fcfbfdb646 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -491,7 +491,6 @@ TEST_MONOTONICITY(ObjectIsString)
TEST_MONOTONICITY(ObjectIsSymbol)
TEST_MONOTONICITY(ObjectIsUndetectable)
TEST_MONOTONICITY(TypeOf)
-TEST_MONOTONICITY(ClassOf)
TEST_MONOTONICITY(ToBoolean)
#undef TEST_MONOTONICITY
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 031217b6b8..4871704d66 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -1463,25 +1463,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
// Miscellaneous.
-TEST_F(InstructionSelectorTest, Uint64LessThanWithLoadAndLoadStackPointer) {
- StreamBuilder m(this, MachineType::Bool());
- Node* const sl = m.Load(
- MachineType::Pointer(),
- m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
- Node* const sp = m.LoadStackPointer();
- Node* const n = m.Uint64LessThan(sl, sp);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64StackCheck, s[0]->arch_opcode());
- ASSERT_EQ(0U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_set, s[0]->flags_mode());
- EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition());
-}
-
-
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
diff --git a/deps/v8/test/unittests/counters-unittest.cc b/deps/v8/test/unittests/counters-unittest.cc
index 887ba54e01..d4772934d6 100644
--- a/deps/v8/test/unittests/counters-unittest.cc
+++ b/deps/v8/test/unittests/counters-unittest.cc
@@ -611,7 +611,7 @@ TEST_F(RuntimeCallStatsTest, FunctionLengthGetter) {
EXPECT_EQ(1, getter_counter->count());
EXPECT_EQ(2, js_counter->count());
EXPECT_LE(0, getter_counter->time().InMicroseconds());
- EXPECT_LT(js_time, js_counter->time().InMicroseconds());
+ EXPECT_LE(js_time, js_counter->time().InMicroseconds());
{
NativeTimeScope native_timer_scope;
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index 23ff94fae9..adeae2b593 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -23,22 +23,32 @@ class ItemParallelJobTest : public TestWithIsolate {
namespace {
-class EmptyTask : public ItemParallelJob::Task {
+class SimpleTask : public ItemParallelJob::Task {
public:
- explicit EmptyTask(Isolate* isolate, bool* did_run)
+ SimpleTask(Isolate* isolate, bool* did_run)
: ItemParallelJob::Task(isolate), did_run_(did_run) {}
- void RunInParallel() override { *did_run_ = true; }
+ void RunInParallel() override {
+ ItemParallelJob::Item* item = nullptr;
+ while ((item = GetItem<ItemParallelJob::Item>()) != nullptr) {
+ item->MarkFinished();
+ }
+ *did_run_ = true;
+ }
private:
bool* did_run_;
};
+// A simple work item which sets |was_processed| to true, if non-null, when it
+// is processed.
class SimpleItem : public ItemParallelJob::Item {
public:
- explicit SimpleItem(bool* was_processed)
+ explicit SimpleItem(bool* was_processed = nullptr)
: ItemParallelJob::Item(), was_processed_(was_processed) {}
- void Process() { *was_processed_ = true; }
+ void Process() {
+ if (was_processed_) *was_processed_ = true;
+ }
private:
bool* was_processed_;
@@ -57,6 +67,9 @@ class EagerTask : public ItemParallelJob::Task {
}
};
+// A OneShotBarrier is meant to be passed to |counter| users. Users should
+// either Signal() or Wait() when done (based on whether they want to return
+// immediately or wait until others are also done).
class OneShotBarrier {
public:
explicit OneShotBarrier(size_t counter) : counter_(counter) {
@@ -77,28 +90,61 @@ class OneShotBarrier {
mutex_.Unlock();
}
+ void Signal() {
+ mutex_.Lock();
+ counter_--;
+ if (counter_ == 0) {
+ condition_.NotifyAll();
+ }
+ mutex_.Unlock();
+ }
+
private:
base::Mutex mutex_;
base::ConditionVariable condition_;
size_t counter_;
};
+// A task that only processes a single item. Signals |barrier| when done; if
+// |wait_when_done|, will blocks until all other tasks have signaled |barrier|.
+// If |did_process_an_item| is non-null, will set it to true if it does process
+// an item. Otherwise, it will expect to get an item to process (and will report
+// a failure if it doesn't).
class TaskProcessingOneItem : public ItemParallelJob::Task {
public:
- explicit TaskProcessingOneItem(Isolate* isolate, OneShotBarrier* barrier)
- : ItemParallelJob::Task(isolate), barrier_(barrier) {}
+ TaskProcessingOneItem(Isolate* isolate, OneShotBarrier* barrier,
+ bool wait_when_done,
+ bool* did_process_an_item = nullptr)
+ : ItemParallelJob::Task(isolate),
+ barrier_(barrier),
+ wait_when_done_(wait_when_done),
+ did_process_an_item_(did_process_an_item) {}
void RunInParallel() override {
SimpleItem* item = GetItem<SimpleItem>();
- EXPECT_NE(nullptr, item);
- item->Process();
- item->MarkFinished();
- // Avoid canceling the remaining tasks with a simple barrier.
- barrier_->Wait();
+
+ if (did_process_an_item_) {
+ *did_process_an_item_ = item != nullptr;
+ } else {
+ EXPECT_NE(nullptr, item);
+ }
+
+ if (item) {
+ item->Process();
+ item->MarkFinished();
+ }
+
+ if (wait_when_done_) {
+ barrier_->Wait();
+ } else {
+ barrier_->Signal();
+ }
}
private:
OneShotBarrier* barrier_;
+ bool wait_when_done_;
+ bool* did_process_an_item_;
};
class TaskForDifferentItems;
@@ -148,47 +194,95 @@ class ItemB : public BaseItem {
} // namespace
-TEST_F(ItemParallelJobTest, EmptyTaskRuns) {
+// ItemParallelJob runs tasks even without work items (as requested tasks may be
+// responsible for post-processing).
+TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
bool did_run = false;
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
- job.AddTask(new EmptyTask(i_isolate(), &did_run));
- job.Run();
+ job.AddTask(new SimpleTask(i_isolate(), &did_run));
+
+ job.Run(i_isolate()->async_counters());
EXPECT_TRUE(did_run);
}
-TEST_F(ItemParallelJobTest, FinishAllItems) {
- const int kItems = 111;
- bool was_processed[kItems];
- for (int i = 0; i < kItems; i++) {
- was_processed[i] = false;
+TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
+ bool did_run = false;
+ ItemParallelJob job(i_isolate()->cancelable_task_manager(),
+ parallel_job_semaphore());
+ job.AddTask(new SimpleTask(i_isolate(), &did_run));
+
+ job.AddItem(new ItemParallelJob::Item);
+
+ job.Run(i_isolate()->async_counters());
+ EXPECT_TRUE(did_run);
+}
+
+TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
+ const int kNumTasks = 128;
+ const int kNumItems = kNumTasks - 4;
+
+ TaskProcessingOneItem* tasks[kNumTasks] = {};
+ bool did_process_an_item[kNumTasks] = {};
+
+ ItemParallelJob job(i_isolate()->cancelable_task_manager(),
+ parallel_job_semaphore());
+
+ // The barrier ensures that all tasks run. But only the first kNumItems tasks
+ // should be assigned an item to execute.
+ OneShotBarrier barrier(kNumTasks);
+ for (int i = 0; i < kNumTasks; i++) {
+ // Block the main thread when done to prevent it from returning control to
+ // the job (which could cancel tasks that have yet to be scheduled).
+ const bool wait_when_done = i == 0;
+ tasks[i] = new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done,
+ &did_process_an_item[i]);
+ job.AddTask(tasks[i]);
+ }
+
+ for (int i = 0; i < kNumItems; i++) {
+ job.AddItem(new SimpleItem);
+ }
+
+ job.Run(i_isolate()->async_counters());
+
+ for (int i = 0; i < kNumTasks; i++) {
+ // Only the first kNumItems tasks should have been assigned a work item.
+ EXPECT_EQ(i < kNumItems, did_process_an_item[i]);
}
+}
+
+TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
+ const int kItems = 111;
+ bool was_processed[kItems] = {};
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
job.AddTask(new EagerTask(i_isolate()));
for (int i = 0; i < kItems; i++) {
job.AddItem(new SimpleItem(&was_processed[i]));
}
- job.Run();
+ job.Run(i_isolate()->async_counters());
for (int i = 0; i < kItems; i++) {
EXPECT_TRUE(was_processed[i]);
}
}
TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
- const int kItemsAndTasks = 2; // Main thread + additional task.
- bool was_processed[kItemsAndTasks];
+ const int kItemsAndTasks = 256;
+ bool was_processed[kItemsAndTasks] = {};
OneShotBarrier barrier(kItemsAndTasks);
- for (int i = 0; i < kItemsAndTasks; i++) {
- was_processed[i] = false;
- }
ItemParallelJob job(i_isolate()->cancelable_task_manager(),
parallel_job_semaphore());
for (int i = 0; i < kItemsAndTasks; i++) {
job.AddItem(new SimpleItem(&was_processed[i]));
- job.AddTask(new TaskProcessingOneItem(i_isolate(), &barrier));
+
+ // Block the main thread when done to prevent it from returning control to
+ // the job (which could cancel tasks that have yet to be scheduled).
+ const bool wait_when_done = i == 0;
+ job.AddTask(
+ new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
}
- job.Run();
+ job.Run(i_isolate()->async_counters());
for (int i = 0; i < kItemsAndTasks; i++) {
EXPECT_TRUE(was_processed[i]);
}
@@ -202,7 +296,7 @@ TEST_F(ItemParallelJobTest, DifferentItems) {
job.AddItem(new ItemA());
job.AddItem(new ItemB());
job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
- job.Run();
+ job.Run(i_isolate()->async_counters());
EXPECT_TRUE(item_a);
EXPECT_TRUE(item_b);
}
diff --git a/deps/v8/test/unittests/heap/object-stats-unittest.cc b/deps/v8/test/unittests/heap/object-stats-unittest.cc
new file mode 100644
index 0000000000..678fdd2a05
--- /dev/null
+++ b/deps/v8/test/unittests/heap/object-stats-unittest.cc
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <unordered_set>
+
+#include "src/heap/object-stats.h"
+#include "src/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace heap {
+
+namespace {
+
+template <typename T>
+bool Contains(const std::unordered_set<T>& set, T needle) {
+ return set.find(needle) != set.end();
+}
+
+} // namespace
+
+TEST(ObjectStats, NoClashWithInstanceTypes) {
+ std::unordered_set<const char*> virtual_types;
+#define ADD_VIRTUAL_INSTANCE_TYPE(type) virtual_types.insert(#type);
+ VIRTUAL_INSTANCE_TYPE_LIST(ADD_VIRTUAL_INSTANCE_TYPE)
+#undef ADD_VIRTUAL_INSTANCE_TYPE
+#define CHECK_REGULARINSTANCE_TYPE(type) \
+ EXPECT_FALSE(Contains(virtual_types, #type));
+ INSTANCE_TYPE_LIST(CHECK_REGULARINSTANCE_TYPE)
+#undef CHECK_REGULARINSTANCE_TYPE
+}
+
+} // namespace heap
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
index 91abbb1f8f..43386ac385 100644
--- a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
@@ -6,6 +6,7 @@
#include "src/globals.h"
#include "src/heap/scavenge-job.h"
+#include "src/utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -31,6 +32,8 @@ TEST(ScavengeJob, AllocationLimitUnknownScavengeSpeed) {
size_t expected_size = ScavengeJob::kInitialScavengeSpeedInBytesPerMs *
ScavengeJob::kAverageIdleTimeMs -
ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ expected_size = Max(expected_size, ScavengeJob::kMinAllocationLimit);
+
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size - 1,
kNewSpaceCapacity));
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size,
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 26fcd1937b..8591e55628 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -176,7 +176,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.CreateObjectLiteral(0, 0, 0, reg);
// Emit tagged template operations.
- builder.GetTemplateObject(0);
+ builder.GetTemplateObject(0, 0);
// Call operations.
builder.CallAnyReceiver(reg, reg_list, 1)
@@ -384,9 +384,17 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreModuleVariable(1, 42);
// Emit generator operations.
- builder.SuspendGenerator(reg, reg_list, 0)
- .RestoreGeneratorState(reg)
- .ResumeGenerator(reg, reg, reg_list);
+ {
+ // We have to skip over suspend because it returns and marks the remaining
+ // bytecode dead.
+ BytecodeLabel after_suspend;
+ builder.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &after_suspend)
+ .SuspendGenerator(reg, reg_list, 0)
+ .Bind(&after_suspend)
+ .ResumeGenerator(reg, reg_list);
+ }
+ BytecodeJumpTable* gen_jump_table = builder.AllocateJumpTable(1, 0);
+ builder.SwitchOnGeneratorState(reg, gen_jump_table).Bind(gen_jump_table, 0);
// Intrinsics handled by the interpreter.
builder.CallRuntime(Runtime::kInlineIsArray, reg_list);
@@ -438,6 +446,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
operand_scale = Bytecodes::PrefixBytecodeToOperandScale(final_bytecode);
prefix_offset = 1;
code = the_array->get(i + 1);
+ scorecard[code] += 1;
final_bytecode = Bytecodes::FromByte(code);
}
i += prefix_offset + Bytecodes::Size(final_bytecode, operand_scale);
@@ -457,7 +466,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
#define CHECK_BYTECODE_PRESENT(Name, ...) \
/* Check Bytecode is marked in scorecard, unless it's a debug break */ \
if (!Bytecodes::IsDebugBreak(Bytecode::k##Name)) { \
- CHECK_GE(scorecard[Bytecodes::ToByte(Bytecode::k##Name)], 1); \
+ EXPECT_GE(scorecard[Bytecodes::ToByte(Bytecode::k##Name)], 1); \
}
BYTECODE_LIST(CHECK_BYTECODE_PRESENT)
#undef CHECK_BYTECODE_PRESENT
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index e4a956a0ce..9f8f2db953 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -143,8 +143,8 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
CHECK_EQ(bytecodes()->at(i), expected_bytes[i]);
}
- Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
- isolate(), 0, 0, factory()->empty_fixed_array());
+ Handle<BytecodeArray> bytecode_array =
+ writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
PositionTableEntry expected_positions[] = {
@@ -232,8 +232,8 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
static_cast<int>(expected_bytes[i]));
}
- Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
- isolate(), 0, 0, factory()->empty_fixed_array());
+ Handle<BytecodeArray> bytecode_array =
+ writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -284,8 +284,8 @@ TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
static_cast<int>(expected_bytes[i]));
}
- Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
- isolate(), 0, 0, factory()->empty_fixed_array());
+ Handle<BytecodeArray> bytecode_array =
+ writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -350,8 +350,8 @@ TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
static_cast<int>(expected_bytes[i]));
}
- Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
- isolate(), 0, 0, factory()->empty_fixed_array());
+ Handle<BytecodeArray> bytecode_array =
+ writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 03d9397c7f..1d48b65d25 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -63,6 +63,12 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
value_matcher, _, _);
}
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
+ const Matcher<Node*>& value_matcher) {
+ return kPointerSize == 8 ? IsWord64Xor(value_matcher, c::IsInt64Constant(-1))
+ : IsWord32Xor(value_matcher, c::IsInt32Constant(-1));
+}
+
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset) {
@@ -231,9 +237,8 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
}
}
-Matcher<Node*>
-InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
- int offset, OperandSize operand_size) {
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
+ IsUnpoisonedSignedOperand(int offset, OperandSize operand_size) {
switch (operand_size) {
case OperandSize::kByte:
return IsSignedByteOperand(offset);
@@ -247,9 +252,8 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
return nullptr;
}
-Matcher<Node*>
-InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
- int offset, OperandSize operand_size) {
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
+ IsUnpoisonedUnsignedOperand(int offset, OperandSize operand_size) {
switch (operand_size) {
case OperandSize::kByte:
return IsUnsignedByteOperand(offset);
@@ -263,6 +267,50 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
return nullptr;
}
+Matcher<compiler::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonTagged(
+ const Matcher<compiler::Node*> value_matcher) {
+ return IsBitcastWordToTagged(IsWordAnd(c::IsSpeculationPoison(),
+ IsBitcastTaggedToWord(value_matcher)));
+}
+
+Matcher<compiler::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonWord(
+ const Matcher<compiler::Node*> value_matcher) {
+ return IsWordAnd(c::IsSpeculationPoison(), value_matcher);
+}
+
+Matcher<compiler::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsPoisonInt32(
+ const Matcher<compiler::Node*> value_matcher) {
+ Matcher<compiler::Node*> truncated_speculation_poison =
+ Is64() ? c::IsTruncateInt64ToInt32(c::IsSpeculationPoison())
+ : c::IsSpeculationPoison();
+ return IsWord32And(truncated_speculation_poison, value_matcher);
+}
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
+ int offset, OperandSize operand_size) {
+ return IsPoisonInt32(IsUnpoisonedSignedOperand(offset, operand_size));
+}
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
+ int offset, OperandSize operand_size) {
+ return IsPoisonInt32(IsUnpoisonedUnsignedOperand(offset, operand_size));
+}
+
+Matcher<compiler::Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand(
+ int offset, OperandSize operand_size) {
+ Matcher<compiler::Node*> reg_operand =
+ IsChangeInt32ToIntPtr(IsUnpoisonedSignedOperand(offset, operand_size));
+ return IsPoisonTagged(
+ IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
+ c::IsWordShl(reg_operand, c::IsIntPtrConstant(kPointerSizeLog2))));
+}
+
TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
// If debug code is enabled we emit extra code in Jump.
if (FLAG_debug_code) return;
@@ -345,17 +393,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
m.IsSignedOperand(offset, operand_size));
break;
}
- case interpreter::OperandType::kRegList:
- case interpreter::OperandType::kReg:
- case interpreter::OperandType::kRegOut:
- case interpreter::OperandType::kRegOutList:
- case interpreter::OperandType::kRegOutPair:
- case interpreter::OperandType::kRegOutTriple:
- case interpreter::OperandType::kRegPair:
- EXPECT_THAT(m.BytecodeOperandReg(i),
- c::IsChangeInt32ToIntPtr(
- m.IsSignedOperand(offset, operand_size)));
- break;
case interpreter::OperandType::kRuntimeId:
EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
m.IsUnsignedOperand(offset, operand_size));
@@ -364,6 +401,16 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
EXPECT_THAT(m.BytecodeOperandIntrinsicId(i),
m.IsUnsignedOperand(offset, operand_size));
break;
+ case interpreter::OperandType::kRegList:
+ case interpreter::OperandType::kReg:
+ case interpreter::OperandType::kRegPair:
+ case interpreter::OperandType::kRegOut:
+ case interpreter::OperandType::kRegOutList:
+ case interpreter::OperandType::kRegOutPair:
+ case interpreter::OperandType::kRegOutTriple:
+ EXPECT_THAT(m.LoadRegisterAtOperandIndex(i),
+ m.IsLoadRegisterOperand(offset, operand_size));
+ break;
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -385,51 +432,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetContext) {
}
}
-TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* reg_index_node = m.Parameter(0);
- Node* reg_location_node = m.RegisterLocation(reg_index_node);
- EXPECT_THAT(
- reg_location_node,
- c::IsIntPtrAdd(c::IsLoadParentFramePointer(),
- c::IsWordShl(reg_index_node,
- c::IsIntPtrConstant(kPointerSizeLog2))));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* reg_index_node = m.Parameter(0);
- Node* load_reg_node = m.LoadRegister(reg_index_node);
- EXPECT_THAT(
- load_reg_node,
- m.IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
- c::IsWordShl(reg_index_node,
- c::IsIntPtrConstant(kPointerSizeLog2))));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerTestState state(this, bytecode);
- InterpreterAssemblerForTest m(&state, bytecode);
- Node* store_value = m.Int32Constant(0xDEADBEEF);
- Node* reg_index_node = m.Parameter(0);
- Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
- EXPECT_THAT(store_reg_node,
- m.IsStore(c::StoreRepresentation(MachineRepresentation::kTagged,
- kNoWriteBarrier),
- c::IsLoadParentFramePointer(),
- c::IsWordShl(reg_index_node,
- c::IsIntPtrConstant(kPointerSizeLog2)),
- store_value));
- }
-}
-
TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
@@ -442,11 +444,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
- kHeapObjectTag)));
+ EXPECT_THAT(load_constant,
+ m.IsPoisonTagged(m.IsLoad(
+ MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
+ kHeapObjectTag))));
}
{
Node* index = m.Parameter(2);
@@ -458,11 +460,12 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
kHeapObjectTag));
EXPECT_THAT(
load_constant,
- m.IsLoad(
+ m.IsPoisonTagged(m.IsLoad(
MachineType::AnyTagged(), constant_pool_matcher,
c::IsIntPtrAdd(
c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- c::IsWordShl(index, c::IsIntPtrConstant(kPointerSizeLog2)))));
+ c::IsWordShl(index,
+ c::IsIntPtrConstant(kPointerSizeLog2))))));
}
}
}
@@ -504,8 +507,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
CodeFactory::InterpreterCEntry(isolate(), result_size);
Node* function_id = m.Int32Constant(0);
- Node* first_arg = m.IntPtrConstant(1);
- Node* arg_count = m.Int32Constant(2);
+ InterpreterAssembler::RegListNodePair registers(m.IntPtrConstant(1),
+ m.Int32Constant(2));
Node* context = m.IntPtrConstant(4);
Matcher<Node*> function_table = c::IsExternalConstant(
@@ -518,11 +521,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
m.IsLoad(MachineType::Pointer(), function,
c::IsIntPtrConstant(offsetof(Runtime::Function, entry)));
- Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
- arg_count, result_size);
- EXPECT_THAT(call_runtime,
- c::IsCall(_, c::IsHeapConstant(builtin.code()), arg_count,
- first_arg, function_entry, context, _, _));
+ Node* call_runtime =
+ m.CallRuntimeN(function_id, context, registers, result_size);
+ EXPECT_THAT(
+ call_runtime,
+ c::IsCall(_, c::IsHeapConstant(builtin.code()),
+ registers.reg_count(), registers.base_reg_location(),
+ function_entry, context, _, _));
}
}
}
@@ -538,10 +543,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
m.IsLoad(MachineType::AnyTagged(), c::IsLoadParentFramePointer(),
c::IsIntPtrConstant(Register::function_closure().ToOperand()
<< kPointerSizeLog2));
- Matcher<Node*> load_vector_cell_matcher =
- m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackVectorOffset -
- kHeapObjectTag));
+ Matcher<Node*> load_vector_cell_matcher = m.IsLoad(
+ MachineType::AnyTagged(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
EXPECT_THAT(
feedback_vector,
m.IsLoad(MachineType::AnyTagged(), load_vector_cell_matcher,
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 589d0c8df5..2e768be5c4 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -49,6 +49,15 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& index_matcher,
const Matcher<compiler::Node*>& value_matcher);
+ Matcher<Node*> IsWordNot(const Matcher<Node*>& value_matcher);
+
+ Matcher<compiler::Node*> IsPoisonTagged(
+ const Matcher<compiler::Node*> value_matcher);
+ Matcher<compiler::Node*> IsPoisonInt32(
+ const Matcher<compiler::Node*> value_matcher);
+ Matcher<compiler::Node*> IsPoisonWord(
+ const Matcher<compiler::Node*> value_matcher);
+
Matcher<compiler::Node*> IsUnsignedByteOperand(int offset);
Matcher<compiler::Node*> IsSignedByteOperand(int offset);
Matcher<compiler::Node*> IsUnsignedShortOperand(int offset);
@@ -56,11 +65,19 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<compiler::Node*> IsUnsignedQuadOperand(int offset);
Matcher<compiler::Node*> IsSignedQuadOperand(int offset);
+ Matcher<compiler::Node*> IsUnpoisonedSignedOperand(
+ int offset, OperandSize operand_size);
+ Matcher<compiler::Node*> IsUnpoisonedUnsignedOperand(
+ int offset, OperandSize operand_size);
+
Matcher<compiler::Node*> IsSignedOperand(int offset,
OperandSize operand_size);
Matcher<compiler::Node*> IsUnsignedOperand(int offset,
OperandSize operand_size);
+ Matcher<compiler::Node*> IsLoadRegisterOperand(int offset,
+ OperandSize operand_size);
+
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
};
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/object-unittest.cc
index 4cb113a644..c88e04a89b 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/object-unittest.cc
@@ -19,10 +19,11 @@ namespace {
bool IsInStringInstanceTypeList(InstanceType instance_type) {
switch (instance_type) {
-#define TEST_INSTANCE_TYPE(type, ...) \
- case InstanceType::type: \
- STATIC_ASSERT(InstanceType::type < InstanceType::FIRST_NONSTRING_TYPE);
-
+#define ASSERT_INSTANCE_TYPE(type, ...) \
+ STATIC_ASSERT(InstanceType::type < InstanceType::FIRST_NONSTRING_TYPE);
+ STRING_TYPE_LIST(ASSERT_INSTANCE_TYPE)
+#undef ASSERT_INSTANCE_TYPE
+#define TEST_INSTANCE_TYPE(type, ...) case InstanceType::type:
STRING_TYPE_LIST(TEST_INSTANCE_TYPE)
#undef TEST_INSTANCE_TYPE
return true;
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 354d1b7d2d..d19c337239 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -32,6 +32,8 @@ TestWithIsolate::~TestWithIsolate() {}
void TestWithIsolate::SetUpTestCase() {
Test::SetUpTestCase();
EXPECT_EQ(NULL, isolate_);
+ // Make BigInt64Array / BigUint64Array available for testing.
+ i::FLAG_harmony_bigint = true;
v8::Isolate::CreateParams create_params;
array_buffer_allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
create_params.array_buffer_allocator = array_buffer_allocator_;
diff --git a/deps/v8/test/unittests/testcfg.py b/deps/v8/test/unittests/testcfg.py
index 9b18743566..05fdd85809 100644
--- a/deps/v8/test/unittests/testcfg.py
+++ b/deps/v8/test/unittests/testcfg.py
@@ -10,18 +10,23 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
+class VariantsGenerator(testsuite.VariantsGenerator):
+ def _get_variants(self, test):
+ return self._standard_variant
+
+
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
- shell = os.path.abspath(os.path.join(context.shell_dir, self.name))
+ def ListTests(self):
+ shell = os.path.abspath(os.path.join(self.test_config.shell_dir, self.name))
if utils.IsWindows():
shell += ".exe"
output = None
for i in xrange(3): # Try 3 times in case of errors.
cmd = command.Command(
- cmd_prefix=context.command_prefix,
+ cmd_prefix=self.test_config.command_prefix,
shell=shell,
- args=['--gtest_list_tests'] + context.extra_flags)
+ args=['--gtest_list_tests'] + self.test_config.extra_flags)
output = cmd.execute()
if output.exit_code == 0:
break
@@ -50,15 +55,15 @@ class TestSuite(testsuite.TestSuite):
def _test_class(self):
return TestCase
- def _LegacyVariantsGeneratorFactory(self):
- return testsuite.StandardLegacyVariantsGenerator
+ def _variants_gen_class(self):
+ return VariantsGenerator
class TestCase(testcase.TestCase):
- def _get_suite_flags(self, ctx):
+ def _get_suite_flags(self):
return (
["--gtest_filter=" + self.path] +
- ["--gtest_random_seed=%s" % ctx.random_seed] +
+ ["--gtest_random_seed=%s" % self.random_seed] +
["--gtest_print_time=0"]
)
@@ -66,5 +71,5 @@ class TestCase(testcase.TestCase):
return self.suite.name
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/unittests/unicode-unittest.cc b/deps/v8/test/unittests/unicode-unittest.cc
index e5ccaca7b1..068052a3fc 100644
--- a/deps/v8/test/unittests/unicode-unittest.cc
+++ b/deps/v8/test/unittests/unicode-unittest.cc
@@ -8,6 +8,7 @@
#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
+#include "src/vector.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -15,16 +16,6 @@ namespace internal {
namespace {
-using Utf8Decoder = unibrow::Utf8Decoder<512>;
-
-void Decode(Utf8Decoder* decoder, const std::string& str) {
- // Put the string in its own buffer on the heap to make sure that
- // AddressSanitizer's heap-buffer-overflow logic can see what's going on.
- std::unique_ptr<char[]> buffer(new char[str.length()]);
- memcpy(buffer.get(), str.data(), str.length());
- decoder->Reset(buffer.get(), str.length());
-}
-
void DecodeNormally(const std::vector<byte>& bytes,
std::vector<unibrow::uchar>* output) {
size_t cursor = 0;
@@ -34,6 +25,28 @@ void DecodeNormally(const std::vector<byte>& bytes,
}
}
+template <size_t kBufferSize>
+void DecodeUtf16(unibrow::Utf8Decoder<kBufferSize>* decoder,
+ const std::vector<byte>& bytes,
+ std::vector<unibrow::uchar>* output) {
+ const char* bytes_begin = reinterpret_cast<const char*>(&(*bytes.begin()));
+ auto vector = Vector<const char>(bytes_begin, bytes.size());
+ decoder->Reset(vector);
+
+ std::vector<uint16_t> utf16(decoder->Utf16Length());
+ decoder->WriteUtf16(&(*utf16.begin()), decoder->Utf16Length(), vector);
+
+ // Decode back into code points
+ for (size_t i = 0; i < utf16.size(); i++) {
+ uint16_t b = utf16[i];
+ if (unibrow::Utf16::IsLeadSurrogate(b)) {
+ output->push_back(unibrow::Utf16::CombineSurrogatePair(b, utf16[++i]));
+ } else {
+ output->push_back(b);
+ }
+ }
+}
+
void DecodeIncrementally(const std::vector<byte>& bytes,
std::vector<unibrow::uchar>* output) {
unibrow::Utf8::Utf8IncrementalBuffer buffer = 0;
@@ -53,14 +66,52 @@ void DecodeIncrementally(const std::vector<byte>& bytes,
} // namespace
-TEST(UnicodeTest, ReadOffEndOfUtf8String) {
- Utf8Decoder decoder;
+TEST(UnicodeTest, Utf16BufferReuse) {
+ unibrow::Utf8Decoder<4> utf16_decoder;
+
+ // Not enough continuation bytes before string ends.
+ typedef struct {
+ std::vector<byte> bytes;
+ std::vector<unibrow::uchar> unicode_expected;
+ } TestCase;
+
+ TestCase data[] = {
+ {{0x00}, {0x0}},
+ {{0xC2, 0x80}, {0x80}},
+ {{0xE0, 0xA0, 0x80}, {0x800}},
+ {{0xF0, 0x90, 0x80, 0x80}, {0x10000}},
+ {{0xE0, 0xA0, 0x80}, {0x800}},
+ {{0xC2, 0x80}, {0x80}},
+ {{0x00}, {0x0}},
+ };
+ for (auto test : data) {
+ // For figuring out which test fails:
+ fprintf(stderr, "test: ");
+ for (auto b : test.bytes) {
+ fprintf(stderr, "%x ", b);
+ }
+ fprintf(stderr, "\n");
+
+ std::vector<unibrow::uchar> output_utf16;
+ DecodeUtf16(&utf16_decoder, test.bytes, &output_utf16);
+
+ CHECK_EQ(output_utf16.size(), test.unicode_expected.size());
+ for (size_t i = 0; i < output_utf16.size(); ++i) {
+ CHECK_EQ(output_utf16[i], test.unicode_expected[i]);
+ }
+ }
+}
+
+TEST(UnicodeTest, SurrogateOverrunsBuffer) {
+ unibrow::Utf8Decoder<2> utf16_decoder;
+ std::vector<unibrow::uchar> output_utf16;
// Not enough continuation bytes before string ends.
- Decode(&decoder, "\xE0");
- Decode(&decoder, "\xED");
- Decode(&decoder, "\xF0");
- Decode(&decoder, "\xF4");
+ DecodeUtf16(&utf16_decoder, {0x00, 0xF0, 0x90, 0x80, 0x80, 0x00},
+ &output_utf16);
+ CHECK_EQ(output_utf16[0], 0x00);
+ CHECK_EQ(output_utf16[1], 0x10000);
+ CHECK_EQ(output_utf16[0], 0x00);
}
TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
@@ -414,6 +465,8 @@ TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
0x8FFFF}},
};
+ unibrow::Utf8Decoder<50> utf16_decoder;
+
for (auto test : data) {
// For figuring out which test fails:
fprintf(stderr, "test: ");
@@ -437,6 +490,14 @@ TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
for (size_t i = 0; i < output_incremental.size(); ++i) {
CHECK_EQ(output_incremental[i], test.unicode_expected[i]);
}
+
+ std::vector<unibrow::uchar> output_utf16;
+ DecodeUtf16(&utf16_decoder, test.bytes, &output_utf16);
+
+ CHECK_EQ(output_utf16.size(), test.unicode_expected.size());
+ for (size_t i = 0; i < output_utf16.size(); ++i) {
+ CHECK_EQ(output_utf16[i], test.unicode_expected[i]);
+ }
}
}
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
deleted file mode 100644
index 50e820e5f1..0000000000
--- a/deps/v8/test/unittests/unittests.gyp
+++ /dev/null
@@ -1,305 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# The sources are kept automatically in sync with BUILD.gn.
-
-{
- 'variables': {
- 'v8_code': 1,
- 'unittests_sources': [ ### gcmole(all) ###
- 'allocation-unittest.cc',
- 'api/access-check-unittest.cc',
- 'api/exception-unittest.cc',
- 'api/interceptor-unittest.cc',
- 'api/isolate-unittest.cc',
- 'api/remote-object-unittest.cc',
- 'api/v8-object-unittest.cc',
- 'asmjs/asm-scanner-unittest.cc',
- 'asmjs/asm-types-unittest.cc',
- 'asmjs/switch-logic-unittest.cc',
- 'base/atomic-utils-unittest.cc',
- 'base/bits-unittest.cc',
- 'base/cpu-unittest.cc',
- 'base/division-by-constant-unittest.cc',
- 'base/flags-unittest.cc',
- 'base/functional-unittest.cc',
- 'base/ieee754-unittest.cc',
- 'base/logging-unittest.cc',
- 'base/macros-unittest.cc',
- 'base/iterator-unittest.cc',
- 'base/ostreams-unittest.cc',
- 'base/platform/condition-variable-unittest.cc',
- 'base/platform/mutex-unittest.cc',
- 'base/platform/platform-unittest.cc',
- 'base/platform/semaphore-unittest.cc',
- 'base/platform/time-unittest.cc',
- 'base/sys-info-unittest.cc',
- 'base/template-utils-unittest.cc',
- 'base/utils/random-number-generator-unittest.cc',
- 'bigint-unittest.cc',
- 'cancelable-tasks-unittest.cc',
- 'char-predicates-unittest.cc',
- "code-stub-assembler-unittest.cc",
- "code-stub-assembler-unittest.h",
- 'compiler/branch-elimination-unittest.cc',
- 'compiler/bytecode-analysis-unittest.cc',
- 'compiler/checkpoint-elimination-unittest.cc',
- "compiler/code-assembler-unittest.cc",
- "compiler/code-assembler-unittest.h",
- 'compiler/common-operator-reducer-unittest.cc',
- 'compiler/common-operator-unittest.cc',
- 'compiler/compiler-test-utils.h',
- 'compiler/control-equivalence-unittest.cc',
- 'compiler/control-flow-optimizer-unittest.cc',
- 'compiler/dead-code-elimination-unittest.cc',
- 'compiler/diamond-unittest.cc',
- 'compiler/effect-control-linearizer-unittest.cc',
- 'compiler/graph-reducer-unittest.cc',
- 'compiler/graph-reducer-unittest.h',
- 'compiler/graph-trimmer-unittest.cc',
- 'compiler/graph-unittest.cc',
- 'compiler/graph-unittest.h',
- 'compiler/instruction-unittest.cc',
- 'compiler/instruction-selector-unittest.cc',
- 'compiler/instruction-selector-unittest.h',
- 'compiler/instruction-sequence-unittest.cc',
- 'compiler/instruction-sequence-unittest.h',
- 'compiler/int64-lowering-unittest.cc',
- 'compiler/js-builtin-reducer-unittest.cc',
- 'compiler/js-create-lowering-unittest.cc',
- 'compiler/js-intrinsic-lowering-unittest.cc',
- 'compiler/js-operator-unittest.cc',
- 'compiler/js-typed-lowering-unittest.cc',
- 'compiler/linkage-tail-call-unittest.cc',
- 'compiler/live-range-builder.h',
- 'compiler/regalloc/live-range-unittest.cc',
- 'compiler/load-elimination-unittest.cc',
- 'compiler/loop-peeling-unittest.cc',
- 'compiler/machine-operator-reducer-unittest.cc',
- 'compiler/machine-operator-unittest.cc',
- 'compiler/regalloc/move-optimizer-unittest.cc',
- 'compiler/node-cache-unittest.cc',
- 'compiler/node-matchers-unittest.cc',
- 'compiler/node-properties-unittest.cc',
- 'compiler/node-test-utils.cc',
- 'compiler/node-test-utils.h',
- 'compiler/node-unittest.cc',
- 'compiler/opcodes-unittest.cc',
- 'compiler/persistent-unittest.cc',
- 'compiler/regalloc/register-allocator-unittest.cc',
- 'compiler/schedule-unittest.cc',
- 'compiler/scheduler-unittest.cc',
- 'compiler/scheduler-rpo-unittest.cc',
- 'compiler/simplified-lowering-unittest.cc',
- 'compiler/simplified-operator-reducer-unittest.cc',
- 'compiler/simplified-operator-unittest.cc',
- 'compiler/state-values-utils-unittest.cc',
- 'compiler/typed-optimization-unittest.cc',
- 'compiler/typer-unittest.cc',
- 'compiler/value-numbering-reducer-unittest.cc',
- 'compiler/zone-stats-unittest.cc',
- 'compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc',
- 'compiler-dispatcher/compiler-dispatcher-unittest.cc',
- 'compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc',
- 'compiler-dispatcher/unoptimized-compile-job-unittest.cc',
- 'counters-unittest.cc',
- 'detachable-vector-unittest.cc',
- 'eh-frame-iterator-unittest.cc',
- 'eh-frame-writer-unittest.cc',
- 'heap/barrier-unittest.cc',
- 'heap/bitmap-unittest.cc',
- 'heap/embedder-tracing-unittest.cc',
- 'heap/gc-idle-time-handler-unittest.cc',
- 'heap/gc-tracer-unittest.cc',
- 'heap/item-parallel-job-unittest.cc',
- 'heap/marking-unittest.cc',
- 'heap/memory-reducer-unittest.cc',
- 'heap/heap-unittest.cc',
- 'heap/scavenge-job-unittest.cc',
- 'heap/slot-set-unittest.cc',
- 'heap/spaces-unittest.cc',
- 'heap/unmapper-unittest.cc',
- 'heap/worklist-unittest.cc',
- 'interpreter/bytecodes-unittest.cc',
- 'interpreter/bytecode-array-builder-unittest.cc',
- 'interpreter/bytecode-array-iterator-unittest.cc',
- 'interpreter/bytecode-array-random-iterator-unittest.cc',
- 'interpreter/bytecode-array-writer-unittest.cc',
- 'interpreter/bytecode-decoder-unittest.cc',
- 'interpreter/bytecode-node-unittest.cc',
- 'interpreter/bytecode-operands-unittest.cc',
- 'interpreter/bytecode-register-allocator-unittest.cc',
- 'interpreter/bytecode-register-optimizer-unittest.cc',
- 'interpreter/bytecode-source-info-unittest.cc',
- 'interpreter/bytecode-utils.h',
- 'interpreter/constant-array-builder-unittest.cc',
- 'interpreter/interpreter-assembler-unittest.cc',
- 'interpreter/interpreter-assembler-unittest.h',
- 'libplatform/default-platform-unittest.cc',
- 'libplatform/task-queue-unittest.cc',
- 'libplatform/worker-thread-unittest.cc',
- 'locked-queue-unittest.cc',
- 'object-unittest.cc',
- 'parser/ast-value-unittest.cc',
- 'parser/preparser-unittest.cc',
- 'register-configuration-unittest.cc',
- 'run-all-unittests.cc',
- 'source-position-table-unittest.cc',
- 'test-helpers.cc',
- 'test-helpers.h',
- 'test-utils.h',
- 'test-utils.cc',
- 'unicode-unittest.cc',
- 'utils-unittest.cc',
- 'value-serializer-unittest.cc',
- 'zone/segmentpool-unittest.cc',
- 'zone/zone-allocator-unittest.cc',
- 'zone/zone-chunk-list-unittest.cc',
- 'zone/zone-unittest.cc',
- 'wasm/control-transfer-unittest.cc',
- 'wasm/decoder-unittest.cc',
- 'wasm/function-body-decoder-unittest.cc',
- 'wasm/wasm-code-manager-unittest.cc',
- 'wasm/leb-helper-unittest.cc',
- 'wasm/loop-assignment-analysis-unittest.cc',
- 'wasm/module-decoder-unittest.cc',
- 'wasm/streaming-decoder-unittest.cc',
- 'wasm/trap-handler-unittest.cc',
- 'wasm/wasm-macro-gen-unittest.cc',
- 'wasm/wasm-module-builder-unittest.cc',
- 'wasm/wasm-opcodes-unittest.cc',
- ],
- 'unittests_sources_arm': [ ### gcmole(arch:arm) ###
- 'compiler/arm/instruction-selector-arm-unittest.cc',
- ],
- 'unittests_sources_arm64': [ ### gcmole(arch:arm64) ###
- 'compiler/arm64/instruction-selector-arm64-unittest.cc',
- ],
- 'unittests_sources_ia32': [ ### gcmole(arch:ia32) ###
- 'compiler/ia32/instruction-selector-ia32-unittest.cc',
- ],
- 'unittests_sources_mips': [ ### gcmole(arch:mips) ###
- 'compiler/mips/instruction-selector-mips-unittest.cc',
- ],
- 'unittests_sources_mips64': [ ### gcmole(arch:mips64) ###
- 'compiler/mips64/instruction-selector-mips64-unittest.cc',
- ],
- 'unittests_sources_x64': [ ### gcmole(arch:x64) ###
- 'compiler/x64/instruction-selector-x64-unittest.cc',
- ],
- 'unittests_sources_ppc': [ ### gcmole(arch:ppc) ###
- 'compiler/ppc/instruction-selector-ppc-unittest.cc',
- ],
- 'unittests_sources_s390': [ ### gcmole(arch:s390) ###
- 'compiler/s390/instruction-selector-s390-unittest.cc',
- ],
- },
- 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
- 'targets': [
- {
- 'target_name': 'unittests',
- 'type': 'executable',
- 'variables': {
- 'optimize': 'max',
- },
- 'dependencies': [
- '../../testing/gmock.gyp:gmock',
- '../../testing/gtest.gyp:gtest',
- '../../src/v8.gyp:v8',
- '../../src/v8.gyp:v8_libbase',
- '../../src/v8.gyp:v8_libplatform',
- '../../src/v8.gyp:v8_maybe_snapshot',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- '<@(unittests_sources)',
- ],
- 'conditions': [
- ['v8_target_arch=="arm"', {
- 'sources': [
- '<@(unittests_sources_arm)',
- ],
- }],
- ['v8_target_arch=="arm64"', {
- 'sources': [
- '<@(unittests_sources_arm64)',
- ],
- }],
- ['v8_target_arch=="ia32"', {
- 'sources': [
- '<@(unittests_sources_ia32)',
- ],
- }],
- ['v8_target_arch=="mips"', {
- 'sources': [
- '<@(unittests_sources_mips)',
- ],
- }],
- ['v8_target_arch=="mipsel"', {
- 'sources': [
- '<@(unittests_sources_mips)',
- ],
- }],
- ['v8_target_arch=="mips64"', {
- 'sources': [
- '<@(unittests_sources_mips64)',
- ],
- }],
- ['v8_target_arch=="mips64el"', {
- 'sources': [
- '<@(unittests_sources_mips64)',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [
- '<@(unittests_sources_x64)',
- ],
- }],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [
- '<@(unittests_sources_ppc)',
- ],
- }],
- ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [
- '<@(unittests_sources_s390)',
- ],
- }],
- ['OS=="aix"', {
- 'ldflags': [ '-Wl,-bbigtoc' ],
- }],
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ],
- }],
- ['v8_use_snapshot=="true"', {
- 'dependencies': ['../../src/v8.gyp:v8_initializers'],
- }],
- ],
- },
- ],
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'unittests_run',
- 'type': 'none',
- 'dependencies': [
- 'unittests',
- ],
- 'includes': [
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'unittests.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index d439913ccf..972d7e11f1 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -3,4 +3,8 @@
# found in the LICENSE file.
[
+['system == windows', {
+ # BUG(7492).
+ 'ThreadTicks.ThreadNow': [SKIP],
+}], # 'system == windows'
]
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index c143b58090..3a997b62ec 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -87,36 +87,27 @@ class ValueSerializerTest : public TestWithIsolate {
}
virtual void BeforeDecode(ValueDeserializer*) {}
- template <typename InputFunctor, typename OutputFunctor>
- void RoundTripTest(const InputFunctor& input_functor,
- const OutputFunctor& output_functor) {
- EncodeTest(input_functor,
- [this, &output_functor](const std::vector<uint8_t>& data) {
- DecodeTest(data, output_functor);
- });
+ Local<Value> RoundTripTest(Local<Value> input_value) {
+ std::vector<uint8_t> encoded = EncodeTest(input_value);
+ return DecodeTest(encoded);
}
// Variant for the common case where a script is used to build the original
// value.
- template <typename OutputFunctor>
- void RoundTripTest(const char* source, const OutputFunctor& output_functor) {
- RoundTripTest([this, source]() { return EvaluateScriptForInput(source); },
- output_functor);
+ Local<Value> RoundTripTest(const char* source) {
+ return RoundTripTest(EvaluateScriptForInput(source));
}
// Variant which uses JSON.parse/stringify to check the result.
void RoundTripJSON(const char* source) {
- RoundTripTest(
- [this, source]() {
- return JSON::Parse(serialization_context_, StringFromUtf8(source))
- .ToLocalChecked();
- },
- [this, source](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_EQ(source, Utf8Value(JSON::Stringify(deserialization_context_,
- value.As<Object>())
- .ToLocalChecked()));
- });
+ Local<Value> input_value =
+ JSON::Parse(serialization_context_, StringFromUtf8(source))
+ .ToLocalChecked();
+ Local<Value> result = RoundTripTest(input_value);
+ ASSERT_TRUE(result->IsObject());
+ EXPECT_EQ(source, Utf8Value(JSON::Stringify(deserialization_context_,
+ result.As<Object>())
+ .ToLocalChecked()));
}
Maybe<std::vector<uint8_t>> DoEncode(Local<Value> value) {
@@ -134,41 +125,33 @@ class ValueSerializerTest : public TestWithIsolate {
return Just(std::move(result));
}
- template <typename InputFunctor, typename EncodedDataFunctor>
- void EncodeTest(const InputFunctor& input_functor,
- const EncodedDataFunctor& encoded_data_functor) {
+ std::vector<uint8_t> EncodeTest(Local<Value> input_value) {
Context::Scope scope(serialization_context());
TryCatch try_catch(isolate());
- Local<Value> input_value = input_functor();
std::vector<uint8_t> buffer;
- ASSERT_TRUE(DoEncode(input_value).To(&buffer));
- ASSERT_FALSE(try_catch.HasCaught());
- encoded_data_functor(buffer);
+ // Ideally we would use GTest's ASSERT_* macros here and below. However,
+ // those only work in functions returning {void}, and they only terminate
+ // the current function, but not the entire current test (so we would need
+ // additional manual checks whether it is okay to proceed). Given that our
+ // test driver starts a new process for each test anyway, it is acceptable
+ // to just use a CHECK (which would kill the process on failure) instead.
+ CHECK(DoEncode(input_value).To(&buffer));
+ CHECK(!try_catch.HasCaught());
+ return buffer;
}
- template <typename InputFunctor, typename MessageFunctor>
- void InvalidEncodeTest(const InputFunctor& input_functor,
- const MessageFunctor& functor) {
+ v8::Local<v8::Message> InvalidEncodeTest(Local<Value> input_value) {
Context::Scope scope(serialization_context());
TryCatch try_catch(isolate());
- Local<Value> input_value = input_functor();
- ASSERT_TRUE(DoEncode(input_value).IsNothing());
- functor(try_catch.Message());
- }
-
- template <typename MessageFunctor>
- void InvalidEncodeTest(const char* source, const MessageFunctor& functor) {
- InvalidEncodeTest(
- [this, source]() { return EvaluateScriptForInput(source); }, functor);
+ CHECK(DoEncode(input_value).IsNothing());
+ return try_catch.Message();
}
- void InvalidEncodeTest(const char* source) {
- InvalidEncodeTest(source, [](Local<Message>) {});
+ v8::Local<v8::Message> InvalidEncodeTest(const char* source) {
+ return InvalidEncodeTest(EvaluateScriptForInput(source));
}
- template <typename OutputFunctor>
- void DecodeTest(const std::vector<uint8_t>& data,
- const OutputFunctor& output_functor) {
+ Local<Value> DecodeTest(const std::vector<uint8_t>& data) {
Local<Context> context = deserialization_context();
Context::Scope scope(context);
TryCatch try_catch(isolate());
@@ -178,22 +161,19 @@ class ValueSerializerTest : public TestWithIsolate {
deserializer.SetSupportsLegacyWireFormat(true);
deserializer.SetExpectInlineWasm(ExpectInlineWasm());
BeforeDecode(&deserializer);
- ASSERT_TRUE(deserializer.ReadHeader(context).FromMaybe(false));
+ CHECK(deserializer.ReadHeader(context).FromMaybe(false));
Local<Value> result;
- ASSERT_TRUE(deserializer.ReadValue(context).ToLocal(&result));
- ASSERT_FALSE(result.IsEmpty());
- ASSERT_FALSE(try_catch.HasCaught());
- ASSERT_TRUE(
- context->Global()
- ->CreateDataProperty(context, StringFromUtf8("result"), result)
- .FromMaybe(false));
- output_functor(result);
- ASSERT_FALSE(try_catch.HasCaught());
+ CHECK(deserializer.ReadValue(context).ToLocal(&result));
+ CHECK(!result.IsEmpty());
+ CHECK(!try_catch.HasCaught());
+ CHECK(context->Global()
+ ->CreateDataProperty(context, StringFromUtf8("result"), result)
+ .FromMaybe(false));
+ CHECK(!try_catch.HasCaught());
+ return result;
}
- template <typename OutputFunctor>
- void DecodeTestForVersion0(const std::vector<uint8_t>& data,
- const OutputFunctor& output_functor) {
+ Local<Value> DecodeTestForVersion0(const std::vector<uint8_t>& data) {
Local<Context> context = deserialization_context();
Context::Scope scope(context);
TryCatch try_catch(isolate());
@@ -203,18 +183,17 @@ class ValueSerializerTest : public TestWithIsolate {
deserializer.SetSupportsLegacyWireFormat(true);
deserializer.SetExpectInlineWasm(ExpectInlineWasm());
BeforeDecode(&deserializer);
- ASSERT_TRUE(deserializer.ReadHeader(context).FromMaybe(false));
- ASSERT_EQ(0u, deserializer.GetWireFormatVersion());
+ CHECK(deserializer.ReadHeader(context).FromMaybe(false));
+ CHECK_EQ(0u, deserializer.GetWireFormatVersion());
Local<Value> result;
- ASSERT_TRUE(deserializer.ReadValue(context).ToLocal(&result));
- ASSERT_FALSE(result.IsEmpty());
- ASSERT_FALSE(try_catch.HasCaught());
- ASSERT_TRUE(
- context->Global()
- ->CreateDataProperty(context, StringFromUtf8("result"), result)
- .FromMaybe(false));
- output_functor(result);
- ASSERT_FALSE(try_catch.HasCaught());
+ CHECK(deserializer.ReadValue(context).ToLocal(&result));
+ CHECK(!result.IsEmpty());
+ CHECK(!try_catch.HasCaught());
+ CHECK(context->Global()
+ ->CreateDataProperty(context, StringFromUtf8("result"), result)
+ .FromMaybe(false));
+ CHECK(!try_catch.HasCaught());
+ return result;
}
void InvalidDecodeTest(const std::vector<uint8_t>& data) {
@@ -232,24 +211,26 @@ class ValueSerializerTest : public TestWithIsolate {
EXPECT_TRUE(try_catch.HasCaught());
return;
}
- ASSERT_TRUE(header_result.ToChecked());
- ASSERT_TRUE(deserializer.ReadValue(context).IsEmpty());
+ CHECK(header_result.ToChecked());
+ CHECK(deserializer.ReadValue(context).IsEmpty());
EXPECT_TRUE(try_catch.HasCaught());
}
Local<Value> EvaluateScriptForInput(const char* utf8_source) {
+ Context::Scope scope(serialization_context_);
Local<String> source = StringFromUtf8(utf8_source);
Local<Script> script =
Script::Compile(serialization_context_, source).ToLocalChecked();
return script->Run(serialization_context_).ToLocalChecked();
}
- bool EvaluateScriptForResultBool(const char* utf8_source) {
+ void ExpectScriptTrue(const char* utf8_source) {
+ Context::Scope scope(deserialization_context_);
Local<String> source = StringFromUtf8(utf8_source);
Local<Script> script =
Script::Compile(deserialization_context_, source).ToLocalChecked();
Local<Value> value = script->Run(deserialization_context_).ToLocalChecked();
- return value->BooleanValue(deserialization_context_).FromJust();
+ EXPECT_TRUE(value->BooleanValue(deserialization_context_).FromJust());
}
Local<String> StringFromUtf8(const char* source) {
@@ -297,123 +278,111 @@ TEST_F(ValueSerializerTest, DecodeInvalid) {
}
TEST_F(ValueSerializerTest, RoundTripOddball) {
- RoundTripTest([this]() { return Undefined(isolate()); },
- [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
- RoundTripTest([this]() { return True(isolate()); },
- [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
- RoundTripTest([this]() { return False(isolate()); },
- [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
- RoundTripTest([this]() { return Null(isolate()); },
- [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+ Local<Value> value = RoundTripTest(Undefined(isolate()));
+ EXPECT_TRUE(value->IsUndefined());
+ value = RoundTripTest(True(isolate()));
+ EXPECT_TRUE(value->IsTrue());
+ value = RoundTripTest(False(isolate()));
+ EXPECT_TRUE(value->IsFalse());
+ value = RoundTripTest(Null(isolate()));
+ EXPECT_TRUE(value->IsNull());
}
TEST_F(ValueSerializerTest, DecodeOddball) {
// What this code is expected to generate.
- DecodeTest({0xFF, 0x09, 0x5F},
- [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
- DecodeTest({0xFF, 0x09, 0x54},
- [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
- DecodeTest({0xFF, 0x09, 0x46},
- [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
- DecodeTest({0xFF, 0x09, 0x30},
- [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+ Local<Value> value = DecodeTest({0xFF, 0x09, 0x5F});
+ EXPECT_TRUE(value->IsUndefined());
+ value = DecodeTest({0xFF, 0x09, 0x54});
+ EXPECT_TRUE(value->IsTrue());
+ value = DecodeTest({0xFF, 0x09, 0x46});
+ EXPECT_TRUE(value->IsFalse());
+ value = DecodeTest({0xFF, 0x09, 0x30});
+ EXPECT_TRUE(value->IsNull());
// What v9 of the Blink code generates.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x5F, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x54, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x46, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x30, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x5F, 0x00});
+ EXPECT_TRUE(value->IsUndefined());
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x54, 0x00});
+ EXPECT_TRUE(value->IsTrue());
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x46, 0x00});
+ EXPECT_TRUE(value->IsFalse());
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x30, 0x00});
+ EXPECT_TRUE(value->IsNull());
// v0 (with no explicit version).
- DecodeTest({0x5F, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
- DecodeTest({0x54, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
- DecodeTest({0x46, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
- DecodeTest({0x30, 0x00},
- [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+ value = DecodeTest({0x5F, 0x00});
+ EXPECT_TRUE(value->IsUndefined());
+ value = DecodeTest({0x54, 0x00});
+ EXPECT_TRUE(value->IsTrue());
+ value = DecodeTest({0x46, 0x00});
+ EXPECT_TRUE(value->IsFalse());
+ value = DecodeTest({0x30, 0x00});
+ EXPECT_TRUE(value->IsNull());
}
TEST_F(ValueSerializerTest, RoundTripNumber) {
- RoundTripTest([this]() { return Integer::New(isolate(), 42); },
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(42, Int32::Cast(*value)->Value());
- });
- RoundTripTest([this]() { return Integer::New(isolate(), -31337); },
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(-31337, Int32::Cast(*value)->Value());
- });
- RoundTripTest(
- [this]() {
- return Integer::New(isolate(), std::numeric_limits<int32_t>::min());
- },
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(std::numeric_limits<int32_t>::min(),
- Int32::Cast(*value)->Value());
- });
- RoundTripTest([this]() { return Number::New(isolate(), -0.25); },
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsNumber());
- EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
- });
- RoundTripTest(
- [this]() {
- return Number::New(isolate(), std::numeric_limits<double>::quiet_NaN());
- },
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsNumber());
- EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
- });
+ Local<Value> value = RoundTripTest(Integer::New(isolate(), 42));
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+
+ value = RoundTripTest(Integer::New(isolate(), -31337));
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(-31337, Int32::Cast(*value)->Value());
+
+ value = RoundTripTest(
+ Integer::New(isolate(), std::numeric_limits<int32_t>::min()));
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(), Int32::Cast(*value)->Value());
+
+ value = RoundTripTest(Number::New(isolate(), -0.25));
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+
+ value = RoundTripTest(
+ Number::New(isolate(), std::numeric_limits<double>::quiet_NaN()));
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
}
TEST_F(ValueSerializerTest, DecodeNumber) {
// 42 zig-zag encoded (signed)
- DecodeTest({0xFF, 0x09, 0x49, 0x54}, [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(42, Int32::Cast(*value)->Value());
- });
+ Local<Value> value = DecodeTest({0xFF, 0x09, 0x49, 0x54});
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+
// 42 varint encoded (unsigned)
- DecodeTest({0xFF, 0x09, 0x55, 0x2A}, [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- EXPECT_EQ(42, Int32::Cast(*value)->Value());
- });
+ value = DecodeTest({0xFF, 0x09, 0x55, 0x2A});
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+
// 160 zig-zag encoded (signed)
- DecodeTest({0xFF, 0x09, 0x49, 0xC0, 0x02}, [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- ASSERT_EQ(160, Int32::Cast(*value)->Value());
- });
+ value = DecodeTest({0xFF, 0x09, 0x49, 0xC0, 0x02});
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+
// 160 varint encoded (unsigned)
- DecodeTest({0xFF, 0x09, 0x55, 0xA0, 0x01}, [](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32());
- ASSERT_EQ(160, Int32::Cast(*value)->Value());
- });
+ value = DecodeTest({0xFF, 0x09, 0x55, 0xA0, 0x01});
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
// IEEE 754 doubles, little-endian byte order
- DecodeTest({0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xBF},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsNumber());
- EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
- });
+ value = DecodeTest(
+ {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xBF});
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+
// quiet NaN
- DecodeTest({0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x7F},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsNumber());
- EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
- });
+ value = DecodeTest(
+ {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x7F});
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+
// signaling NaN
- DecodeTest({0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x7F},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsNumber());
- EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
- });
+ value = DecodeTest(
+ {0xFF, 0x09, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF4, 0x7F});
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
#endif
// TODO(jbroman): Equivalent test for big-endian machines.
}
@@ -424,103 +393,88 @@ static const char kQuebecString[] = "\x51\x75\xC3\xA9\x62\x65\x63";
static const char kEmojiString[] = "\xF0\x9F\x91\x8A";
TEST_F(ValueSerializerTest, RoundTripString) {
- RoundTripTest([this]() { return String::Empty(isolate()); },
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
- });
+ Local<Value> value = RoundTripTest(String::Empty(isolate()));
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+
// Inside ASCII.
- RoundTripTest([this]() { return StringFromUtf8(kHelloString); },
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(5, String::Cast(*value)->Length());
- EXPECT_EQ(kHelloString, Utf8Value(value));
- });
+ value = RoundTripTest(StringFromUtf8(kHelloString));
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+
// Inside Latin-1 (i.e. one-byte string), but not ASCII.
- RoundTripTest([this]() { return StringFromUtf8(kQuebecString); },
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(6, String::Cast(*value)->Length());
- EXPECT_EQ(kQuebecString, Utf8Value(value));
- });
+ value = RoundTripTest(StringFromUtf8(kQuebecString));
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+
// An emoji (decodes to two 16-bit chars).
- RoundTripTest([this]() { return StringFromUtf8(kEmojiString); },
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(2, String::Cast(*value)->Length());
- EXPECT_EQ(kEmojiString, Utf8Value(value));
- });
+ value = RoundTripTest(StringFromUtf8(kEmojiString));
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
}
TEST_F(ValueSerializerTest, DecodeString) {
// Decoding the strings above from UTF-8.
- DecodeTest({0xFF, 0x09, 0x53, 0x00}, [](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
- });
- DecodeTest({0xFF, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(5, String::Cast(*value)->Length());
- EXPECT_EQ(kHelloString, Utf8Value(value));
- });
- DecodeTest({0xFF, 0x09, 0x53, 0x07, 'Q', 'u', 0xC3, 0xA9, 'b', 'e', 'c'},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(6, String::Cast(*value)->Length());
- EXPECT_EQ(kQuebecString, Utf8Value(value));
- });
- DecodeTest({0xFF, 0x09, 0x53, 0x04, 0xF0, 0x9F, 0x91, 0x8A},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(2, String::Cast(*value)->Length());
- EXPECT_EQ(kEmojiString, Utf8Value(value));
- });
+ Local<Value> value = DecodeTest({0xFF, 0x09, 0x53, 0x00});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+
+ value = DecodeTest({0xFF, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x53, 0x07, 'Q', 'u', 0xC3, 0xA9, 'b', 'e', 'c'});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+
+ value = DecodeTest({0xFF, 0x09, 0x53, 0x04, 0xF0, 0x9F, 0x91, 0x8A});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
// And from Latin-1 (for the ones that fit).
- DecodeTest({0xFF, 0x0A, 0x22, 0x00}, [](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
- });
- DecodeTest({0xFF, 0x0A, 0x22, 0x05, 'H', 'e', 'l', 'l', 'o'},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(5, String::Cast(*value)->Length());
- EXPECT_EQ(kHelloString, Utf8Value(value));
- });
- DecodeTest({0xFF, 0x0A, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(6, String::Cast(*value)->Length());
- EXPECT_EQ(kQuebecString, Utf8Value(value));
- });
+ value = DecodeTest({0xFF, 0x0A, 0x22, 0x00});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+
+ value = DecodeTest({0xFF, 0x0A, 0x22, 0x05, 'H', 'e', 'l', 'l', 'o'});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+
+ value = DecodeTest({0xFF, 0x0A, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
// And from two-byte strings (endianness dependent).
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xFF, 0x09, 0x63, 0x00}, [](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(0, String::Cast(*value)->Length());
- });
- DecodeTest({0xFF, 0x09, 0x63, 0x0A, 'H', '\0', 'e', '\0', 'l', '\0', 'l',
- '\0', 'o', '\0'},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(5, String::Cast(*value)->Length());
- EXPECT_EQ(kHelloString, Utf8Value(value));
- });
- DecodeTest({0xFF, 0x09, 0x63, 0x0C, 'Q', '\0', 'u', '\0', 0xE9, '\0', 'b',
- '\0', 'e', '\0', 'c', '\0'},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(6, String::Cast(*value)->Length());
- EXPECT_EQ(kQuebecString, Utf8Value(value));
- });
- DecodeTest({0xFF, 0x09, 0x63, 0x04, 0x3D, 0xD8, 0x4A, 0xDC},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsString());
- EXPECT_EQ(2, String::Cast(*value)->Length());
- EXPECT_EQ(kEmojiString, Utf8Value(value));
- });
+ value = DecodeTest({0xFF, 0x09, 0x63, 0x00});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+
+ value = DecodeTest({0xFF, 0x09, 0x63, 0x0A, 'H', '\0', 'e', '\0', 'l', '\0',
+ 'l', '\0', 'o', '\0'});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+
+ value = DecodeTest({0xFF, 0x09, 0x63, 0x0C, 'Q', '\0', 'u', '\0', 0xE9, '\0',
+ 'b', '\0', 'e', '\0', 'c', '\0'});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+
+ value = DecodeTest({0xFF, 0x09, 0x63, 0x04, 0x3D, 0xD8, 0x4A, 0xDC});
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
#endif
// TODO(jbroman): The same for big-endian systems.
}
@@ -543,152 +497,124 @@ TEST_F(ValueSerializerTest, EncodeTwoByteStringUsesPadding) {
// As long as the output has a version that Blink expects to be able to read,
// we must respect its alignment requirements. It requires that two-byte
// characters be aligned.
- EncodeTest(
- [this]() {
- // We need a string whose length will take two bytes to encode, so that
- // a padding byte is needed to keep the characters aligned. The string
- // must also have a two-byte character, so that it gets the two-byte
- // encoding.
- std::string string(200, ' ');
- string += kEmojiString;
- return StringFromUtf8(string.c_str());
- },
- [](const std::vector<uint8_t>& data) {
- // This is a sufficient but not necessary condition. This test assumes
- // that the wire format version is one byte long, but is flexible to
- // what that value may be.
- const uint8_t expected_prefix[] = {0x00, 0x63, 0x94, 0x03};
- ASSERT_GT(data.size(), sizeof(expected_prefix) + 2);
- EXPECT_EQ(0xFF, data[0]);
- EXPECT_GE(data[1], 0x09);
- EXPECT_LE(data[1], 0x7F);
- EXPECT_TRUE(std::equal(std::begin(expected_prefix),
- std::end(expected_prefix), data.begin() + 2));
- });
+ // We need a string whose length will take two bytes to encode, so that
+ // a padding byte is needed to keep the characters aligned. The string
+ // must also have a two-byte character, so that it gets the two-byte
+ // encoding.
+ std::string string(200, ' ');
+ string += kEmojiString;
+ const std::vector<uint8_t> data = EncodeTest(StringFromUtf8(string.c_str()));
+ // This is a sufficient but not necessary condition. This test assumes
+ // that the wire format version is one byte long, but is flexible to
+ // what that value may be.
+ const uint8_t expected_prefix[] = {0x00, 0x63, 0x94, 0x03};
+ ASSERT_GT(data.size(), sizeof(expected_prefix) + 2);
+ EXPECT_EQ(0xFF, data[0]);
+ EXPECT_GE(data[1], 0x09);
+ EXPECT_LE(data[1], 0x7F);
+ EXPECT_TRUE(std::equal(std::begin(expected_prefix), std::end(expected_prefix),
+ data.begin() + 2));
}
TEST_F(ValueSerializerTest, RoundTripDictionaryObject) {
// Empty object.
- RoundTripTest("({})", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Object.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 0"));
- });
+ Local<Value> value = RoundTripTest("({})");
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Object.prototype");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 0");
+
// String key.
- RoundTripTest("({ a: 42 })", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 1"));
- });
+ value = RoundTripTest("({ a: 42 })");
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result.hasOwnProperty('a')");
+ ExpectScriptTrue("result.a === 42");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+
// Integer key (treated as a string, but may be encoded differently).
- RoundTripTest("({ 42: 'a' })", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 1"));
- });
+ value = RoundTripTest("({ 42: 'a' })");
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result.hasOwnProperty('42')");
+ ExpectScriptTrue("result[42] === 'a'");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+
// Key order must be preserved.
- RoundTripTest("({ x: 1, y: 2, a: 3 })", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
- });
+ value = RoundTripTest("({ x: 1, y: 2, a: 3 })");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).toString() === 'x,y,a'");
+
// A harder case of enumeration order.
// Indexes first, in order (but not 2^32 - 1, which is not an index), then the
// remaining (string) keys, in the order they were defined.
- RoundTripTest(
- "({ a: 2, 0xFFFFFFFF: 1, 0xFFFFFFFE: 3, 1: 0 })",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).toString() === "
- "'1,4294967294,a,4294967295'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 2"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFF] === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFE] === 3"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 0"));
- });
+ value = RoundTripTest("({ a: 2, 0xFFFFFFFF: 1, 0xFFFFFFFE: 3, 1: 0 })");
+ ExpectScriptTrue(
+ "Object.getOwnPropertyNames(result).toString() === "
+ "'1,4294967294,a,4294967295'");
+ ExpectScriptTrue("result.a === 2");
+ ExpectScriptTrue("result[0xFFFFFFFF] === 1");
+ ExpectScriptTrue("result[0xFFFFFFFE] === 3");
+ ExpectScriptTrue("result[1] === 0");
+
// This detects a fairly subtle case: the object itself must be in the map
// before its properties are deserialized, so that references to it can be
// resolved.
- RoundTripTest(
- "(() => { var y = {}; y.self = y; return y; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool("result === result.self"));
- });
+ value = RoundTripTest("var y = {}; y.self = y; y;");
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result === result.self");
}
TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
// Empty object.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x7B, 0x00, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Object.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 0"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x7B, 0x00, 0x00});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Object.prototype");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 0");
+
// String key.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
- 0x49, 0x54, 0x7B, 0x01},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 1"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x49, 0x54, 0x7B, 0x01});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result.hasOwnProperty('a')");
+ ExpectScriptTrue("result.a === 42");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+
// Integer key (treated as a string, but may be encoded differently).
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01, 0x53,
- 0x01, 0x61, 0x7B, 0x01},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 1"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x54,
+ 0x3F, 0x01, 0x53, 0x01, 0x61, 0x7B, 0x01});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result.hasOwnProperty('42')");
+ ExpectScriptTrue("result[42] === 'a'");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+
// Key order must be preserved.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x78, 0x3F, 0x01,
- 0x49, 0x02, 0x3F, 0x01, 0x53, 0x01, 0x79, 0x3F, 0x01, 0x49, 0x04, 0x3F,
- 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49, 0x06, 0x7B, 0x03},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x78, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01, 0x53, 0x01,
+ 0x79, 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x49, 0x06, 0x7B, 0x03});
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).toString() === 'x,y,a'");
+
// A harder case of enumeration order.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01,
- 0x49, 0x00, 0x3F, 0x01, 0x55, 0xFE, 0xFF, 0xFF, 0xFF, 0x0F, 0x3F,
- 0x01, 0x49, 0x06, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49,
- 0x04, 0x3F, 0x01, 0x53, 0x0A, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36,
- 0x37, 0x32, 0x39, 0x35, 0x3F, 0x01, 0x49, 0x02, 0x7B, 0x04},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).toString() === "
- "'1,4294967294,a,4294967295'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 2"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFF] === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFE] === 3"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 0"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x49, 0x02,
+ 0x3F, 0x01, 0x49, 0x00, 0x3F, 0x01, 0x55, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0x0F, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01,
+ 0x53, 0x01, 0x61, 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01,
+ 0x53, 0x0A, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36, 0x37,
+ 0x32, 0x39, 0x35, 0x3F, 0x01, 0x49, 0x02, 0x7B, 0x04});
+ ExpectScriptTrue(
+ "Object.getOwnPropertyNames(result).toString() === "
+ "'1,4294967294,a,4294967295'");
+ ExpectScriptTrue("result.a === 2");
+ ExpectScriptTrue("result[0xFFFFFFFF] === 1");
+ ExpectScriptTrue("result[0xFFFFFFFE] === 3");
+ ExpectScriptTrue("result[1] === 0");
+
// This detects a fairly subtle case: the object itself must be in the map
// before its properties are deserialized, so that references to it can be
// resolved.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x04, 0x73,
- 0x65, 0x6C, 0x66, 0x3F, 0x01, 0x5E, 0x00, 0x7B, 0x01, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool("result === result.self"));
- });
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x04, 0x73,
+ 0x65, 0x6C, 0x66, 0x3F, 0x01, 0x5E, 0x00, 0x7B, 0x01, 0x00});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result === result.self");
}
TEST_F(ValueSerializerTest, InvalidDecodeObjectWithInvalidKeyType) {
@@ -701,89 +627,72 @@ TEST_F(ValueSerializerTest, InvalidDecodeObjectWithInvalidKeyType) {
TEST_F(ValueSerializerTest, RoundTripOnlyOwnEnumerableStringKeys) {
// Only "own" properties should be serialized, not ones on the prototype.
- RoundTripTest("(() => { var x = {}; x.__proto__ = {a: 4}; return x; })()",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!('a' in result)"));
- });
+ Local<Value> value = RoundTripTest("var x = {}; x.__proto__ = {a: 4}; x;");
+ ExpectScriptTrue("!('a' in result)");
+
// Only enumerable properties should be serialized.
- RoundTripTest(
- "(() => {"
- " var x = {};"
- " Object.defineProperty(x, 'a', {value: 1, enumerable: false});"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!('a' in result)"));
- });
+ value = RoundTripTest(
+ "var x = {};"
+ "Object.defineProperty(x, 'a', {value: 1, enumerable: false});"
+ "x;");
+ ExpectScriptTrue("!('a' in result)");
+
// Symbol keys should not be serialized.
- RoundTripTest("({ [Symbol()]: 4 })", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertySymbols(result).length === 0"));
- });
+ value = RoundTripTest("({ [Symbol()]: 4 })");
+ ExpectScriptTrue("Object.getOwnPropertySymbols(result).length === 0");
}
TEST_F(ValueSerializerTest, RoundTripTrickyGetters) {
// Keys are enumerated before any setters are called, but if there is no own
// property when the value is to be read, then it should not be serialized.
- RoundTripTest("({ get a() { delete this.b; return 1; }, b: 2 })",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
- });
+ Local<Value> value =
+ RoundTripTest("({ get a() { delete this.b; return 1; }, b: 2 })");
+ ExpectScriptTrue("!('b' in result)");
+
// Keys added after the property enumeration should not be serialized.
- RoundTripTest("({ get a() { this.b = 3; }})", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
- });
+ value = RoundTripTest("({ get a() { this.b = 3; }})");
+ ExpectScriptTrue("!('b' in result)");
+
// But if you remove a key and add it back, that's fine. But it will appear in
// the original place in enumeration order.
- RoundTripTest(
- "({ get a() { delete this.b; this.b = 4; }, b: 2, c: 3 })",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).toString() === 'a,b,c'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.b === 4"));
- });
+ value =
+ RoundTripTest("({ get a() { delete this.b; this.b = 4; }, b: 2, c: 3 })");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).toString() === 'a,b,c'");
+ ExpectScriptTrue("result.b === 4");
+
// Similarly, it only matters if a property was enumerable when the
// enumeration happened.
- RoundTripTest(
+ value = RoundTripTest(
"({ get a() {"
" Object.defineProperty(this, 'b', {value: 2, enumerable: false});"
- "}, b: 1})",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.b === 2"));
- });
- RoundTripTest(
- "(() => {"
- " var x = {"
- " get a() {"
- " Object.defineProperty(this, 'b', {value: 2, enumerable: true});"
- " }"
- " };"
- " Object.defineProperty(x, 'b',"
- " {value: 1, enumerable: false, configurable: true});"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
- });
+ "}, b: 1})");
+ ExpectScriptTrue("result.b === 2");
+
+ value = RoundTripTest(
+ "var x = {"
+ " get a() {"
+ " Object.defineProperty(this, 'b', {value: 2, enumerable: true});"
+ " }"
+ "};"
+ "Object.defineProperty(x, 'b',"
+ " {value: 1, enumerable: false, configurable: true});"
+ "x;");
+ ExpectScriptTrue("!('b' in result)");
+
// The property also should not be read if it can only be found on the
// prototype chain (but not as an own property) after enumeration.
- RoundTripTest(
- "(() => {"
- " var x = { get a() { delete this.b; }, b: 1 };"
- " x.__proto__ = { b: 0 };"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
- });
+ value = RoundTripTest(
+ "var x = { get a() { delete this.b; }, b: 1 };"
+ "x.__proto__ = { b: 0 };"
+ "x;");
+ ExpectScriptTrue("!('b' in result)");
+
// If an exception is thrown by script, encoding must fail and the exception
// must be thrown.
- InvalidEncodeTest("({ get a() { throw new Error('sentinel'); } })",
- [this](Local<Message> message) {
- ASSERT_FALSE(message.IsEmpty());
- EXPECT_NE(std::string::npos,
- Utf8Value(message->Get()).find("sentinel"));
- });
+ Local<Message> message =
+ InvalidEncodeTest("({ get a() { throw new Error('sentinel'); } })");
+ ASSERT_FALSE(message.IsEmpty());
+ EXPECT_NE(std::string::npos, Utf8Value(message->Get()).find("sentinel"));
}
TEST_F(ValueSerializerTest, RoundTripDictionaryObjectForTransitions) {
@@ -813,234 +722,186 @@ TEST_F(ValueSerializerTest, RoundTripDictionaryObjectForTransitions) {
TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
// Empty object.
- DecodeTestForVersion0(
- {0x7B, 0x00}, [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Object.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 0"));
- });
+ Local<Value> value = DecodeTestForVersion0({0x7B, 0x00});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Object.prototype");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 0");
+
// String key.
- DecodeTestForVersion0(
- {0x53, 0x01, 0x61, 0x49, 0x54, 0x7B, 0x01, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Object.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 1"));
- });
+ value =
+ DecodeTestForVersion0({0x53, 0x01, 0x61, 0x49, 0x54, 0x7B, 0x01, 0x00});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Object.prototype");
+ ExpectScriptTrue("result.hasOwnProperty('a')");
+ ExpectScriptTrue("result.a === 42");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+
// Integer key (treated as a string, but may be encoded differently).
- DecodeTestForVersion0(
- {0x49, 0x54, 0x53, 0x01, 0x61, 0x7B, 0x01, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).length === 1"));
- });
+ value =
+ DecodeTestForVersion0({0x49, 0x54, 0x53, 0x01, 0x61, 0x7B, 0x01, 0x00});
+ ASSERT_TRUE(value->IsObject());
+ ExpectScriptTrue("result.hasOwnProperty('42')");
+ ExpectScriptTrue("result[42] === 'a'");
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).length === 1");
+
// Key order must be preserved.
- DecodeTestForVersion0(
- {0x53, 0x01, 0x78, 0x49, 0x02, 0x53, 0x01, 0x79, 0x49, 0x04, 0x53, 0x01,
- 0x61, 0x49, 0x06, 0x7B, 0x03, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
- });
+ value = DecodeTestForVersion0({0x53, 0x01, 0x78, 0x49, 0x02, 0x53, 0x01, 0x79,
+ 0x49, 0x04, 0x53, 0x01, 0x61, 0x49, 0x06, 0x7B,
+ 0x03, 0x00});
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).toString() === 'x,y,a'");
+
// A property and an element.
- DecodeTestForVersion0(
- {0x49, 0x54, 0x53, 0x01, 0x61, 0x53, 0x01, 0x61, 0x49, 0x54, 0x7B, 0x02},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getOwnPropertyNames(result).toString() === '42,a'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
- });
+ value = DecodeTestForVersion0(
+ {0x49, 0x54, 0x53, 0x01, 0x61, 0x53, 0x01, 0x61, 0x49, 0x54, 0x7B, 0x02});
+ ExpectScriptTrue("Object.getOwnPropertyNames(result).toString() === '42,a'");
+ ExpectScriptTrue("result[42] === 'a'");
+ ExpectScriptTrue("result.a === 42");
}
TEST_F(ValueSerializerTest, RoundTripArray) {
// A simple array of integers.
- RoundTripTest("[1, 2, 3, 4, 5]", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(5u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Array.prototype"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.toString() === '1,2,3,4,5'"));
- });
+ Local<Value> value = RoundTripTest("[1, 2, 3, 4, 5]");
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(5u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Array.prototype");
+ ExpectScriptTrue("result.toString() === '1,2,3,4,5'");
+
// A long (sparse) array.
- RoundTripTest(
- "(() => { var x = new Array(1000); x[500] = 42; return x; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
- });
+ value = RoundTripTest("var x = new Array(1000); x[500] = 42; x;");
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[500] === 42");
+
// Duplicate reference.
- RoundTripTest(
- "(() => { var y = {}; return [y, y]; })()", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
- });
+ value = RoundTripTest("var y = {}; [y, y];");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === result[1]");
+
// Duplicate reference in a sparse array.
- RoundTripTest(
- "(() => { var x = new Array(1000); x[1] = x[500] = {}; return x; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[1] === 'object'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
- });
+ value = RoundTripTest("var x = new Array(1000); x[1] = x[500] = {}; x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[1] === 'object'");
+ ExpectScriptTrue("result[1] === result[500]");
+
// Self reference.
- RoundTripTest(
- "(() => { var y = []; y[0] = y; return y; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
- });
+ value = RoundTripTest("var y = []; y[0] = y; y;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === result");
+
// Self reference in a sparse array.
- RoundTripTest(
- "(() => { var y = new Array(1000); y[519] = y; return y; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
- });
+ value = RoundTripTest("var y = new Array(1000); y[519] = y; y;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[519] === result");
+
// Array with additional properties.
- RoundTripTest(
- "(() => { var y = [1, 2]; y.foo = 'bar'; return y; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
- });
+ value = RoundTripTest("var y = [1, 2]; y.foo = 'bar'; y;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.toString() === '1,2'");
+ ExpectScriptTrue("result.foo === 'bar'");
+
// Sparse array with additional properties.
- RoundTripTest(
- "(() => { var y = new Array(1000); y.foo = 'bar'; return y; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === ','.repeat(999)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
- });
+ value = RoundTripTest("var y = new Array(1000); y.foo = 'bar'; y;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.toString() === ','.repeat(999)");
+ ExpectScriptTrue("result.foo === 'bar'");
+
// The distinction between holes and undefined elements must be maintained.
- RoundTripTest("[,undefined]", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(0)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty(1)"));
- });
+ value = RoundTripTest("[,undefined]");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[0] === 'undefined'");
+ ExpectScriptTrue("typeof result[1] === 'undefined'");
+ ExpectScriptTrue("!result.hasOwnProperty(0)");
+ ExpectScriptTrue("result.hasOwnProperty(1)");
}
TEST_F(ValueSerializerTest, DecodeArray) {
// A simple array of integers.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x05, 0x3F, 0x01, 0x49, 0x02,
- 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01,
- 0x49, 0x08, 0x3F, 0x01, 0x49, 0x0A, 0x24, 0x00, 0x05, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(5u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Array.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '1,2,3,4,5'"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x05, 0x3F, 0x01, 0x49, 0x02,
+ 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01,
+ 0x49, 0x08, 0x3F, 0x01, 0x49, 0x0A, 0x24, 0x00, 0x05, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(5u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Array.prototype");
+ ExpectScriptTrue("result.toString() === '1,2,3,4,5'");
+
// A long (sparse) array.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
- 0xE8, 0x07, 0x3F, 0x01, 0x49, 0x54, 0x40, 0x01, 0xE8, 0x07},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
- });
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0xE8, 0x07, 0x3F, 0x01, 0x49, 0x54, 0x40, 0x01, 0xE8, 0x07});
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[500] === 42");
+
// Duplicate reference.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F,
- 0x02, 0x5E, 0x01, 0x24, 0x00, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x6F,
+ 0x7B, 0x00, 0x3F, 0x02, 0x5E, 0x01, 0x24, 0x00, 0x02});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === result[1]");
+
// Duplicate reference in a sparse array.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
- 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F, 0x02, 0x49, 0xE8,
- 0x07, 0x3F, 0x02, 0x5E, 0x01, 0x40, 0x02, 0xE8, 0x07, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[1] === 'object'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
- });
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0x02, 0x3F, 0x01, 0x6F, 0x7B, 0x00, 0x3F, 0x02, 0x49, 0xE8,
+ 0x07, 0x3F, 0x02, 0x5E, 0x01, 0x40, 0x02, 0xE8, 0x07, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[1] === 'object'");
+ ExpectScriptTrue("result[1] === result[500]");
+
// Self reference.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x01, 0x3F, 0x01, 0x5E, 0x00, 0x24,
- 0x00, 0x01, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x01, 0x3F, 0x01, 0x5E,
+ 0x00, 0x24, 0x00, 0x01, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === result");
+
// Self reference in a sparse array.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
- 0x8E, 0x08, 0x3F, 0x01, 0x5E, 0x00, 0x40, 0x01, 0xE8, 0x07},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
- });
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01, 0x49,
+ 0x8E, 0x08, 0x3F, 0x01, 0x5E, 0x00, 0x40, 0x01, 0xE8, 0x07});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[519] === result");
+
// Array with additional properties.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01, 0x49, 0x02, 0x3F,
- 0x01, 0x49, 0x04, 0x3F, 0x01, 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F,
- 0x01, 0x53, 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x41, 0x02, 0x3F, 0x01,
+ 0x49, 0x02, 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01,
+ 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F, 0x01, 0x53,
+ 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.toString() === '1,2'");
+ ExpectScriptTrue("result.foo === 'bar'");
+
// Sparse array with additional properties.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01,
- 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F, 0x01, 0x53, 0x03,
- 0x62, 0x61, 0x72, 0x40, 0x01, 0xE8, 0x07, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === ','.repeat(999)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x61, 0xE8, 0x07, 0x3F, 0x01,
+ 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3F, 0x01, 0x53, 0x03,
+ 0x62, 0x61, 0x72, 0x40, 0x01, 0xE8, 0x07, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.toString() === ','.repeat(999)");
+ ExpectScriptTrue("result.foo === 'bar'");
+
// The distinction between holes and undefined elements must be maintained.
// Note that since the previous output from Chrome fails this test, an
// encoding using the sparse format was constructed instead.
- DecodeTest(
- {0xFF, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5F, 0x40, 0x01, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(0)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty(1)"));
- });
+ value =
+ DecodeTest({0xFF, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5F, 0x40, 0x01, 0x02});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[0] === 'undefined'");
+ ExpectScriptTrue("typeof result[1] === 'undefined'");
+ ExpectScriptTrue("!result.hasOwnProperty(0)");
+ ExpectScriptTrue("result.hasOwnProperty(1)");
}
TEST_F(ValueSerializerTest, DecodeInvalidOverLargeArray) {
@@ -1055,375 +916,285 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithNonEnumerableElement) {
// Even though this array looks like [1,5,3], the 5 should be missing from the
// perspective of structured clone, which only clones properties that were
// enumerable.
- RoundTripTest(
- "(() => {"
- " var x = [1,2,3];"
- " Object.defineProperty(x, '1', {enumerable:false, value:5});"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(3u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty('1')"));
- });
+ Local<Value> value = RoundTripTest(
+ "var x = [1,2,3];"
+ "Object.defineProperty(x, '1', {enumerable:false, value:5});"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(3u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("!result.hasOwnProperty('1')");
}
TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
// If an element is deleted before it is serialized, then it's deleted.
- RoundTripTest(
- "(() => {"
- " var x = [{ get a() { delete x[1]; }}, 42];"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
- });
+ Local<Value> value =
+ RoundTripTest("var x = [{ get a() { delete x[1]; }}, 42]; x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[1] === 'undefined'");
+ ExpectScriptTrue("!result.hasOwnProperty(1)");
+
// Same for sparse arrays.
- RoundTripTest(
- "(() => {"
- " var x = [{ get a() { delete x[1]; }}, 42];"
- " x.length = 1000;"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
- });
+ value = RoundTripTest(
+ "var x = [{ get a() { delete x[1]; }}, 42];"
+ "x.length = 1000;"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("typeof result[1] === 'undefined'");
+ ExpectScriptTrue("!result.hasOwnProperty(1)");
+
// If the length is changed, then the resulting array still has the original
// length, but elements that were not yet serialized are gone.
- RoundTripTest(
- "(() => {"
- " var x = [1, { get a() { x.length = 0; }}, 3, 4];"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(4u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
- });
+ value = RoundTripTest("var x = [1, { get a() { x.length = 0; }}, 3, 4]; x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(4u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === 1");
+ ExpectScriptTrue("!result.hasOwnProperty(2)");
+
// The same is true if the length is shortened, but there are still items
// remaining.
- RoundTripTest(
- "(() => {"
- " var x = [1, { get a() { x.length = 3; }}, 3, 4];"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(4u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[2] === 3"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(3)"));
- });
+ value = RoundTripTest("var x = [1, { get a() { x.length = 3; }}, 3, 4]; x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(4u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[2] === 3");
+ ExpectScriptTrue("!result.hasOwnProperty(3)");
+
// Same for sparse arrays.
- RoundTripTest(
- "(() => {"
- " var x = [1, { get a() { x.length = 0; }}, 3, 4];"
- " x.length = 1000;"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
- });
- RoundTripTest(
- "(() => {"
- " var x = [1, { get a() { x.length = 3; }}, 3, 4];"
- " x.length = 1000;"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[2] === 3"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(3)"));
- });
+ value = RoundTripTest(
+ "var x = [1, { get a() { x.length = 0; }}, 3, 4];"
+ "x.length = 1000;"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[0] === 1");
+ ExpectScriptTrue("!result.hasOwnProperty(2)");
+
+ value = RoundTripTest(
+ "var x = [1, { get a() { x.length = 3; }}, 3, 4];"
+ "x.length = 1000;"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[2] === 3");
+ ExpectScriptTrue("!result.hasOwnProperty(3)");
+
// If a getter makes a property non-enumerable, it should still be enumerated
// as enumeration happens once before getters are invoked.
- RoundTripTest(
- "(() => {"
- " var x = [{ get a() {"
- " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
- " }}, 2];"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
- });
+ value = RoundTripTest(
+ "var x = [{ get a() {"
+ " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
+ "}}, 2];"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[1] === 3");
+
// Same for sparse arrays.
- RoundTripTest(
- "(() => {"
- " var x = [{ get a() {"
- " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
- " }}, 2];"
- " x.length = 1000;"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
- });
+ value = RoundTripTest(
+ "var x = [{ get a() {"
+ " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
+ "}}, 2];"
+ "x.length = 1000;"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[1] === 3");
+
// Getters on the array itself must also run.
- RoundTripTest(
- "(() => {"
- " var x = [1, 2, 3];"
- " Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(3u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
- });
+ value = RoundTripTest(
+ "var x = [1, 2, 3];"
+ "Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(3u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[1] === 4");
+
// Same for sparse arrays.
- RoundTripTest(
- "(() => {"
- " var x = [1, 2, 3];"
- " Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
- " x.length = 1000;"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
- });
+ value = RoundTripTest(
+ "var x = [1, 2, 3];"
+ "Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
+ "x.length = 1000;"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result[1] === 4");
+
// Even with a getter that deletes things, we don't read from the prototype.
- RoundTripTest(
- "(() => {"
- " var x = [{ get a() { delete x[1]; } }, 2];"
- " x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
- });
+ value = RoundTripTest(
+ "var x = [{ get a() { delete x[1]; } }, 2];"
+ "x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("!(1 in result)");
+
// Same for sparse arrays.
- RoundTripTest(
- "(() => {"
- " var x = [{ get a() { delete x[1]; } }, 2];"
- " x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
- " x.length = 1000;"
- " return x;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
- });
+ value = RoundTripTest(
+ "var x = [{ get a() { delete x[1]; } }, 2];"
+ "x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
+ "x.length = 1000;"
+ "x;");
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("!(1 in result)");
}
TEST_F(ValueSerializerTest, DecodeSparseArrayVersion0) {
// Empty (sparse) array.
- DecodeTestForVersion0({0x40, 0x00, 0x00, 0x00},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(0u, Array::Cast(*value)->Length());
- });
+ Local<Value> value = DecodeTestForVersion0({0x40, 0x00, 0x00, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(0u, Array::Cast(*value)->Length());
+
// Sparse array with a mixture of elements and properties.
- DecodeTestForVersion0(
- {0x55, 0x00, 0x53, 0x01, 'a', 0x55, 0x02, 0x55, 0x05, 0x53,
- 0x03, 'f', 'o', 'o', 0x53, 0x03, 'b', 'a', 'r', 0x53,
- 0x03, 'b', 'a', 'z', 0x49, 0x0B, 0x40, 0x04, 0x03, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(3u, Array::Cast(*value)->Length());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.toString() === 'a,,5'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.baz === -6"));
- });
+ value = DecodeTestForVersion0({0x55, 0x00, 0x53, 0x01, 'a', 0x55, 0x02, 0x55,
+ 0x05, 0x53, 0x03, 'f', 'o', 'o', 0x53, 0x03,
+ 'b', 'a', 'r', 0x53, 0x03, 'b', 'a', 'z',
+ 0x49, 0x0B, 0x40, 0x04, 0x03, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(3u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.toString() === 'a,,5'");
+ ExpectScriptTrue("!(1 in result)");
+ ExpectScriptTrue("result.foo === 'bar'");
+ ExpectScriptTrue("result.baz === -6");
+
// Sparse array in a sparse array (sanity check of nesting).
- DecodeTestForVersion0(
- {0x55, 0x01, 0x55, 0x01, 0x54, 0x40, 0x01, 0x02, 0x40, 0x01, 0x02, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(2u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1] instanceof Array"));
- EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result[1])"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[1][1] === true"));
- });
+ value = DecodeTestForVersion0(
+ {0x55, 0x01, 0x55, 0x01, 0x54, 0x40, 0x01, 0x02, 0x40, 0x01, 0x02, 0x00});
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(2u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("!(0 in result)");
+ ExpectScriptTrue("result[1] instanceof Array");
+ ExpectScriptTrue("!(0 in result[1])");
+ ExpectScriptTrue("result[1][1] === true");
}
TEST_F(ValueSerializerTest, RoundTripDenseArrayContainingUndefined) {
// In previous serialization versions, this would be interpreted as an absent
// property.
- RoundTripTest("[undefined]", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(1u, Array::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty(0)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === undefined"));
- });
+ Local<Value> value = RoundTripTest("[undefined]");
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1u, Array::Cast(*value)->Length());
+ ExpectScriptTrue("result.hasOwnProperty(0)");
+ ExpectScriptTrue("result[0] === undefined");
}
TEST_F(ValueSerializerTest, DecodeDenseArrayContainingUndefined) {
// In previous versions, "undefined" in a dense array signified absence of the
// element (for compatibility). In new versions, it has a separate encoding.
- DecodeTest({0xFF, 0x09, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
- });
- DecodeTest(
- {0xFF, 0x0B, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("0 in result"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === undefined"));
- });
- DecodeTest({0xFF, 0x0B, 0x41, 0x01, 0x2D, 0x24, 0x00, 0x01},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01});
+ ExpectScriptTrue("!(0 in result)");
+
+ value = DecodeTest({0xFF, 0x0B, 0x41, 0x01, 0x5F, 0x24, 0x00, 0x01});
+ ExpectScriptTrue("0 in result");
+ ExpectScriptTrue("result[0] === undefined");
+
+ value = DecodeTest({0xFF, 0x0B, 0x41, 0x01, 0x2D, 0x24, 0x00, 0x01});
+ ExpectScriptTrue("!(0 in result)");
}
TEST_F(ValueSerializerTest, RoundTripDate) {
- RoundTripTest("new Date(1e6)", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Date.prototype"));
- });
- RoundTripTest("new Date(Date.UTC(1867, 6, 1))", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toISOString() === '1867-07-01T00:00:00.000Z'"));
- });
- RoundTripTest("new Date(NaN)", [](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
- });
- RoundTripTest(
- "({ a: new Date(), get b() { return this.a; } })",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Date"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ Local<Value> value = RoundTripTest("new Date(1e6)");
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Date.prototype");
+
+ value = RoundTripTest("new Date(Date.UTC(1867, 6, 1))");
+ ASSERT_TRUE(value->IsDate());
+ ExpectScriptTrue("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+
+ value = RoundTripTest("new Date(NaN)");
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+
+ value = RoundTripTest("({ a: new Date(), get b() { return this.a; } })");
+ ExpectScriptTrue("result.a instanceof Date");
+ ExpectScriptTrue("result.a === result.b");
}
TEST_F(ValueSerializerTest, DecodeDate) {
+ Local<Value> value;
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84,
- 0x2E, 0x41, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Date.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89,
- 0x87, 0xC2, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toISOString() === '1867-07-01T00:00:00.000Z'"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xF8, 0x7F, 0x00},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x84, 0x2E, 0x41, 0x00});
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Date.prototype");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45,
+ 0x27, 0x89, 0x87, 0xC2, 0x00});
+ ASSERT_TRUE(value->IsDate());
+ ExpectScriptTrue("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xF8, 0x7F, 0x00});
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
#else
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x41, 0x2E, 0x84, 0x80, 0x00, 0x00,
- 0x00, 0x00, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Date.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0xC2, 0x87, 0x89, 0x27, 0x45, 0x20,
- 0x00, 0x00, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toISOString() === '1867-07-01T00:00:00.000Z'"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00},
- [](Local<Value> value) {
- ASSERT_TRUE(value->IsDate());
- EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x41, 0x2E, 0x84, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00});
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Date.prototype");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0xC2, 0x87, 0x89, 0x27,
+ 0x45, 0x20, 0x00, 0x00, 0x00});
+ ASSERT_TRUE(value->IsDate());
+ ExpectScriptTrue("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x44, 0x7F, 0xF8, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00});
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
#endif
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F,
- 0x01, 0x44, 0x00, 0x20, 0x39, 0x50, 0x37, 0x6A, 0x75, 0x42, 0x3F,
- 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Date"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53,
+ 0x01, 0x61, 0x3F, 0x01, 0x44, 0x00, 0x20, 0x39,
+ 0x50, 0x37, 0x6A, 0x75, 0x42, 0x3F, 0x02, 0x53,
+ 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02});
+ ExpectScriptTrue("result.a instanceof Date");
+ ExpectScriptTrue("result.a === result.b");
}
TEST_F(ValueSerializerTest, RoundTripValueObjects) {
- RoundTripTest("new Boolean(true)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Boolean.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === true"));
- });
- RoundTripTest("new Boolean(false)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Boolean.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === false"));
- });
- RoundTripTest(
- "({ a: new Boolean(true), get b() { return this.a; }})",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Boolean"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
- RoundTripTest("new Number(-42)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Number.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
- });
- RoundTripTest("new Number(NaN)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Number.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("Number.isNaN(result.valueOf())"));
- });
- RoundTripTest(
- "({ a: new Number(6), get b() { return this.a; }})",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Number"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
- RoundTripTest("new String('Qu\\xe9bec')", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === String.prototype"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.valueOf() === 'Qu\\xe9bec'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
- });
- RoundTripTest("new String('\\ud83d\\udc4a')", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === String.prototype"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.valueOf() === '\\ud83d\\udc4a'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 2"));
- });
- RoundTripTest(
- "({ a: new String(), get b() { return this.a; }})",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof String"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ Local<Value> value = RoundTripTest("new Boolean(true)");
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
+ ExpectScriptTrue("result.valueOf() === true");
+
+ value = RoundTripTest("new Boolean(false)");
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
+ ExpectScriptTrue("result.valueOf() === false");
+
+ value =
+ RoundTripTest("({ a: new Boolean(true), get b() { return this.a; }})");
+ ExpectScriptTrue("result.a instanceof Boolean");
+ ExpectScriptTrue("result.a === result.b");
+
+ value = RoundTripTest("new Number(-42)");
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("result.valueOf() === -42");
+
+ value = RoundTripTest("new Number(NaN)");
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("Number.isNaN(result.valueOf())");
+
+ value = RoundTripTest("({ a: new Number(6), get b() { return this.a; }})");
+ ExpectScriptTrue("result.a instanceof Number");
+ ExpectScriptTrue("result.a === result.b");
+
+ value = RoundTripTest("new String('Qu\\xe9bec')");
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === 'Qu\\xe9bec'");
+ ExpectScriptTrue("result.length === 6");
+
+ value = RoundTripTest("new String('\\ud83d\\udc4a')");
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === '\\ud83d\\udc4a'");
+ ExpectScriptTrue("result.length === 2");
+
+ value = RoundTripTest("({ a: new String(), get b() { return this.a; }})");
+ ExpectScriptTrue("result.a instanceof String");
+ ExpectScriptTrue("result.a === result.b");
}
TEST_F(ValueSerializerTest, RejectsOtherValueObjects) {
@@ -1432,437 +1203,322 @@ TEST_F(ValueSerializerTest, RejectsOtherValueObjects) {
}
TEST_F(ValueSerializerTest, DecodeValueObjects) {
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x79, 0x00}, [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Boolean.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === true"));
- });
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x78, 0x00}, [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Boolean.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === false"));
- });
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
- 0x79, 0x3F, 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Boolean"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ Local<Value> value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x79, 0x00});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
+ ExpectScriptTrue("result.valueOf() === true");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x78, 0x00});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Boolean.prototype");
+ ExpectScriptTrue("result.valueOf() === false");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53,
+ 0x01, 0x61, 0x3F, 0x01, 0x79, 0x3F, 0x02, 0x53,
+ 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02});
+ ExpectScriptTrue("result.a instanceof Boolean");
+ ExpectScriptTrue("result.a === result.b");
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45,
- 0xC0, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Number.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xF8, 0x7F, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Number.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Number.isNaN(result.valueOf())"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x45, 0xC0, 0x00});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("result.valueOf() === -42");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xF8, 0x7F, 0x00});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("Number.isNaN(result.valueOf())");
#else
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6E, 0xC0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Number.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Number.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Number.isNaN(result.valueOf())"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0xC0, 0x45, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("result.valueOf() === -42");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6E, 0x7F, 0xF8, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Number.prototype");
+ ExpectScriptTrue("Number.isNaN(result.valueOf())");
#endif
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F,
- 0x01, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x40, 0x3F,
- 0x02, 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Number"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x07, 0x51, 0x75, 0xC3, 0xA9, 0x62,
- 0x65, 0x63, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === String.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.valueOf() === 'Qu\\xe9bec'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x04, 0xF0, 0x9F, 0x91, 0x8A},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === String.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.valueOf() === '\\ud83d\\udc4a'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 2"));
- });
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
- 0x61, 0x3F, 0x01, 0x73, 0x00, 0x3F, 0x02, 0x53, 0x01,
- 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof String"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53,
+ 0x01, 0x61, 0x3F, 0x01, 0x6E, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0x40, 0x3F, 0x02, 0x53,
+ 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02});
+ ExpectScriptTrue("result.a instanceof Number");
+ ExpectScriptTrue("result.a === result.b");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x07, 0x51, 0x75, 0xC3,
+ 0xA9, 0x62, 0x65, 0x63, 0x00});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === 'Qu\\xe9bec'");
+ ExpectScriptTrue("result.length === 6");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x73, 0x04, 0xF0, 0x9F, 0x91, 0x8A});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === '\\ud83d\\udc4a'");
+ ExpectScriptTrue("result.length === 2");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x73, 0x00, 0x3F, 0x02, 0x53, 0x01,
+ 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00});
+ ExpectScriptTrue("result.a instanceof String");
+ ExpectScriptTrue("result.a === result.b");
// String object containing a Latin-1 string.
- DecodeTest({0xFF, 0x0C, 0x73, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === String.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.valueOf() === 'Qu\\xe9bec'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
- });
+ value =
+ DecodeTest({0xFF, 0x0C, 0x73, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c'});
+ ExpectScriptTrue("Object.getPrototypeOf(result) === String.prototype");
+ ExpectScriptTrue("result.valueOf() === 'Qu\\xe9bec'");
+ ExpectScriptTrue("result.length === 6");
}
TEST_F(ValueSerializerTest, RoundTripRegExp) {
- RoundTripTest("/foo/g", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === RegExp.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '/foo/g'"));
- });
- RoundTripTest("new RegExp('Qu\\xe9bec', 'i')", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.toString() === '/Qu\\xe9bec/i'"));
- });
- RoundTripTest("new RegExp('\\ud83d\\udc4a', 'ug')",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/\\ud83d\\udc4a/gu'"));
- });
- RoundTripTest(
- "({ a: /foo/gi, get b() { return this.a; }})",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof RegExp"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ Local<Value> value = RoundTripTest("/foo/g");
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/g'");
+
+ value = RoundTripTest("new RegExp('Qu\\xe9bec', 'i')");
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/Qu\\xe9bec/i'");
+
+ value = RoundTripTest("new RegExp('\\ud83d\\udc4a', 'ug')");
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/\\ud83d\\udc4a/gu'");
+
+ value = RoundTripTest("({ a: /foo/gi, get b() { return this.a; }})");
+ ExpectScriptTrue("result.a instanceof RegExp");
+ ExpectScriptTrue("result.a === result.b");
}
TEST_F(ValueSerializerTest, DecodeRegExp) {
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x01},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === RegExp.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/foo/g'"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x07, 0x51, 0x75, 0xC3, 0xA9, 0x62,
- 0x65, 0x63, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/Qu\\xe9bec/i'"));
- });
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x04, 0xF0, 0x9F, 0x91, 0x8A, 0x11, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/\\ud83d\\udc4a/gu'"));
- });
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61,
- 0x3F, 0x01, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x03, 0x3F, 0x02,
- 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof RegExp"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x01});
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/g'");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x07, 0x51, 0x75, 0xC3,
+ 0xA9, 0x62, 0x65, 0x63, 0x02});
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/Qu\\xe9bec/i'");
+
+ value = DecodeTest(
+ {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x04, 0xF0, 0x9F, 0x91, 0x8A, 0x11, 0x00});
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/\\ud83d\\udc4a/gu'");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01, 0x61,
+ 0x3F, 0x01, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x03, 0x3F, 0x02,
+ 0x53, 0x01, 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00});
+ ExpectScriptTrue("result.a instanceof RegExp");
+ ExpectScriptTrue("result.a === result.b");
// RegExp containing a Latin-1 string.
- DecodeTest(
- {0xFF, 0x0C, 0x52, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c', 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/Qu\\xe9bec/i'"));
- });
+ value = DecodeTest(
+ {0xFF, 0x0C, 0x52, 0x22, 0x06, 'Q', 'u', 0xE9, 'b', 'e', 'c', 0x02});
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("result.toString() === '/Qu\\xe9bec/i'");
}
// Tests that invalid flags are not accepted by the deserializer.
TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x1F},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === RegExp.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/foo/gimuy'"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x3F},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsRegExp());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === RegExp.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '/foo/gimsuy'"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x1F});
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/gimuy'");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x3F});
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/gimsuy'");
+
InvalidDecodeTest(
{0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x7F});
}
TEST_F(ValueSerializerTest, RoundTripMap) {
- RoundTripTest(
- "(() => { var m = new Map(); m.set(42, 'foo'); return m; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Map.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.get(42) === 'foo'"));
- });
- RoundTripTest("(() => { var m = new Map(); m.set(m, m); return m; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.get(result) === result"));
- });
+ Local<Value> value = RoundTripTest("var m = new Map(); m.set(42, 'foo'); m;");
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Map.prototype");
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.get(42) === 'foo'");
+
+ value = RoundTripTest("var m = new Map(); m.set(m, m); m;");
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.get(result) === result");
+
// Iteration order must be preserved.
- RoundTripTest(
- "(() => {"
- " var m = new Map();"
- " m.set(1, 0); m.set('a', 0); m.set(3, 0); m.set(2, 0);"
- " return m;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.keys()).toString() === '1,a,3,2'"));
- });
+ value = RoundTripTest(
+ "var m = new Map();"
+ "m.set(1, 0); m.set('a', 0); m.set(3, 0); m.set(2, 0);"
+ "m;");
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
}
TEST_F(ValueSerializerTest, DecodeMap) {
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01, 0x53,
- 0x03, 0x66, 0x6F, 0x6F, 0x3A, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Map.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.get(42) === 'foo'"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x5E, 0x00, 0x3F, 0x01,
- 0x5E, 0x00, 0x3A, 0x02, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.get(result) === result"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x54, 0x3F,
+ 0x01, 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x3A, 0x02});
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Map.prototype");
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.get(42) === 'foo'");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x5E, 0x00,
+ 0x3F, 0x01, 0x5E, 0x00, 0x3A, 0x02, 0x00});
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.get(result) === result");
+
// Iteration order must be preserved.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x02, 0x3F,
- 0x01, 0x49, 0x00, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
- 0x49, 0x00, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x00,
- 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x00, 0x3A, 0x08},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.keys()).toString() === '1,a,3,2'"));
- });
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3B, 0x3F, 0x01, 0x49, 0x02, 0x3F,
+ 0x01, 0x49, 0x00, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x49, 0x00, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x00,
+ 0x3F, 0x01, 0x49, 0x04, 0x3F, 0x01, 0x49, 0x00, 0x3A, 0x08});
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
}
TEST_F(ValueSerializerTest, RoundTripMapWithTrickyGetters) {
// Even if an entry is removed or reassigned, the original key/value pair is
// used.
- RoundTripTest(
- "(() => {"
- " var m = new Map();"
- " m.set(0, { get a() {"
- " m.delete(1); m.set(2, 'baz'); m.set(3, 'quux');"
- " }});"
- " m.set(1, 'foo');"
- " m.set(2, 'bar');"
- " return m;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.keys()).toString() === '0,1,2'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.get(1) === 'foo'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.get(2) === 'bar'"));
- });
+ Local<Value> value = RoundTripTest(
+ "var m = new Map();"
+ "m.set(0, { get a() {"
+ " m.delete(1); m.set(2, 'baz'); m.set(3, 'quux');"
+ "}});"
+ "m.set(1, 'foo');"
+ "m.set(2, 'bar');"
+ "m;");
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("Array.from(result.keys()).toString() === '0,1,2'");
+ ExpectScriptTrue("result.get(1) === 'foo'");
+ ExpectScriptTrue("result.get(2) === 'bar'");
+
// However, deeper modifications of objects yet to be serialized still apply.
- RoundTripTest(
- "(() => {"
- " var m = new Map();"
- " var key = { get a() { value.foo = 'bar'; } };"
- " var value = { get a() { key.baz = 'quux'; } };"
- " m.set(key, value);"
- " return m;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsMap());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "!('baz' in Array.from(result.keys())[0])"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.values())[0].foo === 'bar'"));
- });
+ value = RoundTripTest(
+ "var m = new Map();"
+ "var key = { get a() { value.foo = 'bar'; } };"
+ "var value = { get a() { key.baz = 'quux'; } };"
+ "m.set(key, value);"
+ "m;");
+ ASSERT_TRUE(value->IsMap());
+ ExpectScriptTrue("!('baz' in Array.from(result.keys())[0])");
+ ExpectScriptTrue("Array.from(result.values())[0].foo === 'bar'");
}
TEST_F(ValueSerializerTest, RoundTripSet) {
- RoundTripTest(
- "(() => { var s = new Set(); s.add(42); s.add('foo'); return s; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Set.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 2"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.has(42)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.has('foo')"));
- });
- RoundTripTest(
- "(() => { var s = new Set(); s.add(s); return s; })()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.has(result)"));
- });
+ Local<Value> value =
+ RoundTripTest("var s = new Set(); s.add(42); s.add('foo'); s;");
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Set.prototype");
+ ExpectScriptTrue("result.size === 2");
+ ExpectScriptTrue("result.has(42)");
+ ExpectScriptTrue("result.has('foo')");
+
+ value = RoundTripTest("var s = new Set(); s.add(s); s;");
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.has(result)");
+
// Iteration order must be preserved.
- RoundTripTest(
- "(() => {"
- " var s = new Set();"
- " s.add(1); s.add('a'); s.add(3); s.add(2);"
- " return s;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.keys()).toString() === '1,a,3,2'"));
- });
+ value = RoundTripTest(
+ "var s = new Set();"
+ "s.add(1); s.add('a'); s.add(3); s.add(2);"
+ "s;");
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
}
TEST_F(ValueSerializerTest, DecodeSet) {
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x54, 0x3F, 0x01,
- 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x2C, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Set.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 2"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.has(42)"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.has('foo')"));
- });
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x5E, 0x00, 0x2C, 0x01, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool("result.size === 1"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.has(result)"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x54, 0x3F,
+ 0x01, 0x53, 0x03, 0x66, 0x6F, 0x6F, 0x2C, 0x02});
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Set.prototype");
+ ExpectScriptTrue("result.size === 2");
+ ExpectScriptTrue("result.has(42)");
+ ExpectScriptTrue("result.has('foo')");
+
+ value = DecodeTest(
+ {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x5E, 0x00, 0x2C, 0x01, 0x00});
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("result.size === 1");
+ ExpectScriptTrue("result.has(result)");
+
// Iteration order must be preserved.
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49, 0x02, 0x3F, 0x01, 0x53,
- 0x01, 0x61, 0x3F, 0x01, 0x49, 0x06, 0x3F, 0x01, 0x49, 0x04, 0x2C, 0x04},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.keys()).toString() === '1,a,3,2'"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x27, 0x3F, 0x01, 0x49,
+ 0x02, 0x3F, 0x01, 0x53, 0x01, 0x61, 0x3F, 0x01,
+ 0x49, 0x06, 0x3F, 0x01, 0x49, 0x04, 0x2C, 0x04});
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("Array.from(result.keys()).toString() === '1,a,3,2'");
}
TEST_F(ValueSerializerTest, RoundTripSetWithTrickyGetters) {
// Even if an element is added or removed during serialization, the original
// set of elements is used.
- RoundTripTest(
- "(() => {"
- " var s = new Set();"
- " s.add({ get a() { s.delete(1); s.add(2); } });"
- " s.add(1);"
- " return s;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.keys()).toString() === '[object Object],1'"));
- });
+ Local<Value> value = RoundTripTest(
+ "var s = new Set();"
+ "s.add({ get a() { s.delete(1); s.add(2); } });"
+ "s.add(1);"
+ "s;");
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue(
+ "Array.from(result.keys()).toString() === '[object Object],1'");
+
// However, deeper modifications of objects yet to be serialized still apply.
- RoundTripTest(
- "(() => {"
- " var s = new Set();"
- " var first = { get a() { second.foo = 'bar'; } };"
- " var second = { get a() { first.baz = 'quux'; } };"
- " s.add(first);"
- " s.add(second);"
- " return s;"
- "})()",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSet());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "!('baz' in Array.from(result.keys())[0])"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Array.from(result.keys())[1].foo === 'bar'"));
- });
+ value = RoundTripTest(
+ "var s = new Set();"
+ "var first = { get a() { second.foo = 'bar'; } };"
+ "var second = { get a() { first.baz = 'quux'; } };"
+ "s.add(first);"
+ "s.add(second);"
+ "s;");
+ ASSERT_TRUE(value->IsSet());
+ ExpectScriptTrue("!('baz' in Array.from(result.keys())[0])");
+ ExpectScriptTrue("Array.from(result.keys())[1].foo === 'bar'");
}
TEST_F(ValueSerializerTest, RoundTripArrayBuffer) {
- RoundTripTest("new ArrayBuffer()", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArrayBuffer());
- EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === ArrayBuffer.prototype"));
- });
- RoundTripTest("new Uint8Array([0, 128, 255]).buffer",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArrayBuffer());
- EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new Uint8Array(result).toString() === '0,128,255'"));
- });
- RoundTripTest(
- "({ a: new ArrayBuffer(), get b() { return this.a; }})",
- [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.a instanceof ArrayBuffer"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ Local<Value> value = RoundTripTest("new ArrayBuffer()");
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === ArrayBuffer.prototype");
+
+ value = RoundTripTest("new Uint8Array([0, 128, 255]).buffer");
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
+ ExpectScriptTrue("new Uint8Array(result).toString() === '0,128,255'");
+
+ value =
+ RoundTripTest("({ a: new ArrayBuffer(), get b() { return this.a; }})");
+ ExpectScriptTrue("result.a instanceof ArrayBuffer");
+ ExpectScriptTrue("result.a === result.b");
}
TEST_F(ValueSerializerTest, DecodeArrayBuffer) {
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArrayBuffer());
- EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === ArrayBuffer.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x03, 0x00, 0x80, 0xFF, 0x00},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArrayBuffer());
- EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new Uint8Array(result).toString() === '0,128,255'"));
- });
- DecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
- 0x61, 0x3F, 0x01, 0x42, 0x00, 0x3F, 0x02, 0x53, 0x01,
- 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.a instanceof ArrayBuffer"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ Local<Value> value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x00});
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(0u, ArrayBuffer::Cast(*value)->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === ArrayBuffer.prototype");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x42, 0x03, 0x00, 0x80, 0xFF, 0x00});
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(3u, ArrayBuffer::Cast(*value)->ByteLength());
+ ExpectScriptTrue("new Uint8Array(result).toString() === '0,128,255'");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x01,
+ 0x61, 0x3F, 0x01, 0x42, 0x00, 0x3F, 0x02, 0x53, 0x01,
+ 0x62, 0x3F, 0x02, 0x5E, 0x01, 0x7B, 0x02, 0x00});
+ ExpectScriptTrue("result.a instanceof ArrayBuffer");
+ ExpectScriptTrue("result.a === result.b");
}
TEST_F(ValueSerializerTest, DecodeInvalidArrayBuffer) {
@@ -1874,11 +1530,7 @@ class OOMArrayBufferAllocator : public ArrayBuffer::Allocator {
public:
void* Allocate(size_t) override { return nullptr; }
void* AllocateUninitialized(size_t) override { return nullptr; }
- void* Reserve(size_t length) override { return nullptr; }
- void Free(void* data, size_t length, AllocationMode mode) override {}
void Free(void*, size_t) override {}
- void SetProtection(void* data, size_t length,
- Protection protection) override {}
};
TEST_F(ValueSerializerTest, DecodeArrayBufferOOM) {
@@ -1947,196 +1599,161 @@ class ValueSerializerTestWithArrayBufferTransfer : public ValueSerializerTest {
TEST_F(ValueSerializerTestWithArrayBufferTransfer,
RoundTripArrayBufferTransfer) {
- RoundTripTest([this]() { return input_buffer(); },
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsArrayBuffer());
- EXPECT_EQ(output_buffer(), value);
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new Uint8Array(result).toString() === '0,1,128,255'"));
- });
- RoundTripTest(
- [this]() {
- Local<Object> object = Object::New(isolate());
- EXPECT_TRUE(object
- ->CreateDataProperty(serialization_context(),
- StringFromUtf8("a"),
- input_buffer())
- .FromMaybe(false));
- EXPECT_TRUE(object
- ->CreateDataProperty(serialization_context(),
- StringFromUtf8("b"),
- input_buffer())
- .FromMaybe(false));
- return object;
- },
- [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.a instanceof ArrayBuffer"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new Uint8Array(result.a).toString() === '0,1,128,255'"));
- });
+ Local<Value> value = RoundTripTest(input_buffer());
+ ASSERT_TRUE(value->IsArrayBuffer());
+ EXPECT_EQ(output_buffer(), value);
+ ExpectScriptTrue("new Uint8Array(result).toString() === '0,1,128,255'");
+
+ Local<Object> object;
+ {
+ Context::Scope scope(serialization_context());
+ object = Object::New(isolate());
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("a"), input_buffer())
+ .FromMaybe(false));
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("b"), input_buffer())
+ .FromMaybe(false));
+ }
+ value = RoundTripTest(object);
+ ExpectScriptTrue("result.a instanceof ArrayBuffer");
+ ExpectScriptTrue("result.a === result.b");
+ ExpectScriptTrue("new Uint8Array(result.a).toString() === '0,1,128,255'");
}
TEST_F(ValueSerializerTest, RoundTripTypedArray) {
-// Check that the right type comes out the other side for every kind of typed
-// array.
-#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype, size) \
- RoundTripTest("new " #Type "Array(2)", [this](Local<Value> value) { \
- ASSERT_TRUE(value->Is##Type##Array()); \
- EXPECT_EQ(2u * size, TypedArray::Cast(*value)->ByteLength()); \
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length()); \
- EXPECT_TRUE(EvaluateScriptForResultBool( \
- "Object.getPrototypeOf(result) === " #Type "Array.prototype")); \
- });
+ // Check that the right type comes out the other side for every kind of typed
+ // array.
+ Local<Value> value;
+#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype, size) \
+ value = RoundTripTest("new " #Type "Array(2)"); \
+ ASSERT_TRUE(value->Is##Type##Array()); \
+ EXPECT_EQ(2u * size, TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length()); \
+ ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
+ "Array.prototype");
+
TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
-#undef TYPED_ARRAY_CASE
+#undef TYPED_ARRAY_ROUND_TRIP_TEST
// Check that values of various kinds are suitably preserved.
- RoundTripTest("new Uint8Array([1, 128, 255])", [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.toString() === '1,128,255'"));
- });
- RoundTripTest("new Int16Array([0, 256, -32768])", [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.toString() === '0,256,-32768'"));
- });
- RoundTripTest("new Float32Array([0, -0.5, NaN, Infinity])",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '0,-0.5,NaN,Infinity'"));
- });
+ value = RoundTripTest("new Uint8Array([1, 128, 255])");
+ ExpectScriptTrue("result.toString() === '1,128,255'");
+
+ value = RoundTripTest("new Int16Array([0, 256, -32768])");
+ ExpectScriptTrue("result.toString() === '0,256,-32768'");
+
+ value = RoundTripTest("new Float32Array([0, -0.5, NaN, Infinity])");
+ ExpectScriptTrue("result.toString() === '0,-0.5,NaN,Infinity'");
// Array buffer views sharing a buffer should do so on the other side.
// Similarly, multiple references to the same typed array should be resolved.
- RoundTripTest(
- "(() => {"
- " var buffer = new ArrayBuffer(32);"
- " return {"
- " u8: new Uint8Array(buffer),"
- " get u8_2() { return this.u8; },"
- " f32: new Float32Array(buffer, 4, 5),"
- " b: buffer,"
- " };"
- "})()",
- [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.u8 instanceof Uint8Array"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.u8 === result.u8_2"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.f32 instanceof Float32Array"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.u8.buffer === result.f32.buffer"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.byteOffset === 4"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.length === 5"));
- });
+ value = RoundTripTest(
+ "var buffer = new ArrayBuffer(32);"
+ "({"
+ " u8: new Uint8Array(buffer),"
+ " get u8_2() { return this.u8; },"
+ " f32: new Float32Array(buffer, 4, 5),"
+ " b: buffer,"
+ "});");
+ ExpectScriptTrue("result.u8 instanceof Uint8Array");
+ ExpectScriptTrue("result.u8 === result.u8_2");
+ ExpectScriptTrue("result.f32 instanceof Float32Array");
+ ExpectScriptTrue("result.u8.buffer === result.f32.buffer");
+ ExpectScriptTrue("result.f32.byteOffset === 4");
+ ExpectScriptTrue("result.f32.length === 5");
}
TEST_F(ValueSerializerTest, DecodeTypedArray) {
// Check that the right type comes out the other side for every kind of typed
// array.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
- 0x42, 0x00, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsUint8Array());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Uint8Array.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00, 0x00, 0x56,
- 0x62, 0x00, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsInt8Array());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Int8Array.prototype"));
- });
+ Local<Value> value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42,
+ 0x02, 0x00, 0x00, 0x56, 0x42, 0x00, 0x02});
+ ASSERT_TRUE(value->IsUint8Array());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Uint8Array.prototype");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x02, 0x00,
+ 0x00, 0x56, 0x62, 0x00, 0x02});
+ ASSERT_TRUE(value->IsInt8Array());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Int8Array.prototype");
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x56, 0x57, 0x00, 0x04},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsUint16Array());
- EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Uint16Array.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x56, 0x77, 0x00, 0x04},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsInt16Array());
- EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Int16Array.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsUint32Array());
- EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Uint32Array.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsInt32Array());
- EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Int32Array.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsFloat32Array());
- EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Float32Array.prototype"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsFloat64Array());
- EXPECT_EQ(16u, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === Float64Array.prototype"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x56, 0x57, 0x00, 0x04});
+ ASSERT_TRUE(value->IsUint16Array());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Uint16Array.prototype");
+
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x56, 0x77, 0x00, 0x04});
+ ASSERT_TRUE(value->IsInt16Array());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Int16Array.prototype");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08});
+ ASSERT_TRUE(value->IsUint32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Uint32Array.prototype");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08});
+ ASSERT_TRUE(value->IsInt32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Int32Array.prototype");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08});
+ ASSERT_TRUE(value->IsFloat32Array());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Float32Array.prototype");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10});
+ ASSERT_TRUE(value->IsFloat64Array());
+ EXPECT_EQ(16u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === Float64Array.prototype");
+
#endif // V8_TARGET_LITTLE_ENDIAN
// Check that values of various kinds are suitably preserved.
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x03, 0x01, 0x80, 0xFF,
- 0x56, 0x42, 0x00, 0x03, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '1,128,255'"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x03, 0x01,
+ 0x80, 0xFF, 0x56, 0x42, 0x00, 0x03, 0x00});
+ ExpectScriptTrue("result.toString() === '1,128,255'");
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x06, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x80, 0x56, 0x77, 0x00, 0x06},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '0,256,-32768'"));
- });
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0xBF, 0x00, 0x00, 0xC0, 0x7F,
- 0x00, 0x00, 0x80, 0x7F, 0x56, 0x66, 0x00, 0x10},
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.toString() === '0,-0.5,NaN,Infinity'"));
- });
+ value = DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x06, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x80, 0x56, 0x77, 0x00, 0x06});
+ ExpectScriptTrue("result.toString() === '0,256,-32768'");
+
+ value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xBF, 0x00, 0x00, 0xC0, 0x7F,
+ 0x00, 0x00, 0x80, 0x7F, 0x56, 0x66, 0x00, 0x10});
+ ExpectScriptTrue("result.toString() === '0,-0.5,NaN,Infinity'");
+
#endif // V8_TARGET_LITTLE_ENDIAN
// Array buffer views sharing a buffer should do so on the other side.
// Similarly, multiple references to the same typed array should be resolved.
- DecodeTest(
+ value = DecodeTest(
{0xFF, 0x09, 0x3F, 0x00, 0x6F, 0x3F, 0x01, 0x53, 0x02, 0x75, 0x38, 0x3F,
0x01, 0x3F, 0x01, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -2144,18 +1761,13 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x56, 0x42, 0x00, 0x20, 0x3F, 0x03, 0x53, 0x04, 0x75, 0x38, 0x5F,
0x32, 0x3F, 0x03, 0x5E, 0x02, 0x3F, 0x03, 0x53, 0x03, 0x66, 0x33, 0x32,
0x3F, 0x03, 0x3F, 0x03, 0x5E, 0x01, 0x56, 0x66, 0x04, 0x14, 0x3F, 0x04,
- 0x53, 0x01, 0x62, 0x3F, 0x04, 0x5E, 0x01, 0x7B, 0x04, 0x00},
- [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.u8 instanceof Uint8Array"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.u8 === result.u8_2"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.f32 instanceof Float32Array"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.u8.buffer === result.f32.buffer"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.byteOffset === 4"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.f32.length === 5"));
- });
+ 0x53, 0x01, 0x62, 0x3F, 0x04, 0x5E, 0x01, 0x7B, 0x04, 0x00});
+ ExpectScriptTrue("result.u8 instanceof Uint8Array");
+ ExpectScriptTrue("result.u8 === result.u8_2");
+ ExpectScriptTrue("result.f32 instanceof Float32Array");
+ ExpectScriptTrue("result.u8.buffer === result.f32.buffer");
+ ExpectScriptTrue("result.f32.byteOffset === 4");
+ ExpectScriptTrue("result.f32.length === 5");
}
TEST_F(ValueSerializerTest, DecodeInvalidTypedArray) {
@@ -2177,28 +1789,23 @@ TEST_F(ValueSerializerTest, DecodeInvalidTypedArray) {
}
TEST_F(ValueSerializerTest, RoundTripDataView) {
- RoundTripTest("new DataView(new ArrayBuffer(4), 1, 2)",
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDataView());
- EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
- EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
- EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === DataView.prototype"));
- });
+ Local<Value> value = RoundTripTest("new DataView(new ArrayBuffer(4), 1, 2)");
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
}
TEST_F(ValueSerializerTest, DecodeDataView) {
- DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x56, 0x3F, 0x01, 0x02},
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsDataView());
- EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
- EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
- EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === DataView.prototype"));
- });
+ Local<Value> value =
+ DecodeTest({0xFF, 0x09, 0x3F, 0x00, 0x3F, 0x00, 0x42, 0x04, 0x00, 0x00,
+ 0x00, 0x00, 0x56, 0x3F, 0x01, 0x02});
+ ASSERT_TRUE(value->IsDataView());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === DataView.prototype");
}
TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
@@ -2210,11 +1817,11 @@ TEST_F(ValueSerializerTest, DecodeInvalidDataView) {
{0xFF, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0x3F, 0x01, 0x03});
}
-class ValueSerializerTestWithSharedArrayBufferTransfer
+class ValueSerializerTestWithSharedArrayBufferClone
: public ValueSerializerTest {
protected:
- ValueSerializerTestWithSharedArrayBufferTransfer()
- : serializer_delegate_(this) {}
+ ValueSerializerTestWithSharedArrayBufferClone()
+ : serializer_delegate_(this), deserializer_delegate_(this) {}
void InitializeData(const std::vector<uint8_t>& data) {
data_ = data;
@@ -2233,10 +1840,6 @@ class ValueSerializerTestWithSharedArrayBufferTransfer
const Local<SharedArrayBuffer>& input_buffer() { return input_buffer_; }
const Local<SharedArrayBuffer>& output_buffer() { return output_buffer_; }
- void BeforeDecode(ValueDeserializer* deserializer) override {
- deserializer->TransferSharedArrayBuffer(0, output_buffer_);
- }
-
static void SetUpTestCase() {
flag_was_enabled_ = i::FLAG_harmony_sharedarraybuffer;
i::FLAG_harmony_sharedarraybuffer = true;
@@ -2259,17 +1862,27 @@ class ValueSerializerTestWithSharedArrayBufferTransfer
class SerializerDelegate : public ValueSerializer::Delegate {
public:
explicit SerializerDelegate(
- ValueSerializerTestWithSharedArrayBufferTransfer* test)
+ ValueSerializerTestWithSharedArrayBufferClone* test)
: test_(test) {}
MOCK_METHOD2(GetSharedArrayBufferId,
Maybe<uint32_t>(Isolate* isolate,
Local<SharedArrayBuffer> shared_array_buffer));
+ MOCK_METHOD2(GetSharedArrayBufferFromId,
+ MaybeLocal<SharedArrayBuffer>(Isolate* isolate, uint32_t id));
void ThrowDataCloneError(Local<String> message) override {
test_->isolate()->ThrowException(Exception::Error(message));
}
private:
- ValueSerializerTestWithSharedArrayBufferTransfer* test_;
+ ValueSerializerTestWithSharedArrayBufferClone* test_;
+ };
+
+ class DeserializerDelegate : public ValueDeserializer::Delegate {
+ public:
+ explicit DeserializerDelegate(
+ ValueSerializerTestWithSharedArrayBufferClone* test) {}
+ MOCK_METHOD2(GetSharedArrayBufferFromId,
+ MaybeLocal<SharedArrayBuffer>(Isolate* isolate, uint32_t id));
};
#if __clang__
@@ -2280,7 +1893,12 @@ class ValueSerializerTestWithSharedArrayBufferTransfer
return &serializer_delegate_;
}
+ ValueDeserializer::Delegate* GetDeserializerDelegate() override {
+ return &deserializer_delegate_;
+ }
+
SerializerDelegate serializer_delegate_;
+ DeserializerDelegate deserializer_delegate_;
private:
static bool flag_was_enabled_;
@@ -2289,49 +1907,43 @@ class ValueSerializerTestWithSharedArrayBufferTransfer
Local<SharedArrayBuffer> output_buffer_;
};
-bool ValueSerializerTestWithSharedArrayBufferTransfer::flag_was_enabled_ =
- false;
+bool ValueSerializerTestWithSharedArrayBufferClone::flag_was_enabled_ = false;
-TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
- RoundTripSharedArrayBufferTransfer) {
+TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
+ RoundTripSharedArrayBufferClone) {
InitializeData({0x00, 0x01, 0x80, 0xFF});
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))
.WillRepeatedly(Return(Just(0U)));
+ EXPECT_CALL(deserializer_delegate_, GetSharedArrayBufferFromId(isolate(), 0U))
+ .WillRepeatedly(Return(output_buffer()));
- RoundTripTest([this]() { return input_buffer(); },
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsSharedArrayBuffer());
- EXPECT_EQ(output_buffer(), value);
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new Uint8Array(result).toString() === '0,1,128,255'"));
- });
- RoundTripTest(
- [this]() {
- Local<Object> object = Object::New(isolate());
- EXPECT_TRUE(object
- ->CreateDataProperty(serialization_context(),
- StringFromUtf8("a"),
- input_buffer())
- .FromMaybe(false));
- EXPECT_TRUE(object
- ->CreateDataProperty(serialization_context(),
- StringFromUtf8("b"),
- input_buffer())
- .FromMaybe(false));
- return object;
- },
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.a instanceof SharedArrayBuffer"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new Uint8Array(result.a).toString() === '0,1,128,255'"));
- });
-}
-
-TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
+ Local<Value> value = RoundTripTest(input_buffer());
+ ASSERT_TRUE(value->IsSharedArrayBuffer());
+ EXPECT_EQ(output_buffer(), value);
+ ExpectScriptTrue("new Uint8Array(result).toString() === '0,1,128,255'");
+
+ Local<Object> object;
+ {
+ Context::Scope scope(serialization_context());
+ object = Object::New(isolate());
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("a"), input_buffer())
+ .FromMaybe(false));
+ EXPECT_TRUE(object
+ ->CreateDataProperty(serialization_context(),
+ StringFromUtf8("b"), input_buffer())
+ .FromMaybe(false));
+ }
+ value = RoundTripTest(object);
+ ExpectScriptTrue("result.a instanceof SharedArrayBuffer");
+ ExpectScriptTrue("result.a === result.b");
+ ExpectScriptTrue("new Uint8Array(result.a).toString() === '0,1,128,255'");
+}
+
+TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
RoundTripWebAssemblyMemory) {
bool flag_was_enabled = i::FLAG_experimental_wasm_threads;
i::FLAG_experimental_wasm_threads = true;
@@ -2343,24 +1955,23 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferTransfer,
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))
.WillRepeatedly(Return(Just(0U)));
+ EXPECT_CALL(deserializer_delegate_, GetSharedArrayBufferFromId(isolate(), 0U))
+ .WillRepeatedly(Return(output_buffer()));
- RoundTripTest(
- [this]() -> Local<Value> {
- const int32_t kMaxPages = 1;
- auto i_isolate = reinterpret_cast<i::Isolate*>(isolate());
- i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(*input_buffer());
- return Utils::Convert<i::WasmMemoryObject, Value>(
- i::WasmMemoryObject::New(i_isolate, obj, kMaxPages));
- },
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result instanceof WebAssembly.Memory"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.buffer.byteLength === 65536"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("new Uint8Array(result.buffer, 0, "
- "4).toString() === '0,1,128,255'"));
- });
+ Local<Value> input;
+ {
+ Context::Scope scope(serialization_context());
+ const int32_t kMaxPages = 1;
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(*input_buffer());
+ input = Utils::Convert<i::WasmMemoryObject, Value>(
+ i::WasmMemoryObject::New(i_isolate, obj, kMaxPages));
+ }
+ RoundTripTest(input);
+ ExpectScriptTrue("result instanceof WebAssembly.Memory");
+ ExpectScriptTrue("result.buffer.byteLength === 65536");
+ ExpectScriptTrue(
+ "new Uint8Array(result.buffer, 0, 4).toString() === '0,1,128,255'");
i::FLAG_experimental_wasm_threads = flag_was_enabled;
}
@@ -2461,17 +2072,15 @@ TEST_F(ValueSerializerTestWithHostObject, RoundTripUint32) {
Local<Value> argv[] = {Integer::NewFromUnsigned(isolate(), value)};
return NewHostObject(deserialization_context(), arraysize(argv), argv);
}));
- RoundTripTest("new ExampleHostObject(42)", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 42"));
- });
- RoundTripTest(
- "new ExampleHostObject(0xCAFECAFE)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 0xCAFECAFE"));
- });
+ Local<Value> value = RoundTripTest("new ExampleHostObject(42)");
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype");
+ ExpectScriptTrue("result.value === 42");
+
+ value = RoundTripTest("new ExampleHostObject(0xCAFECAFE)");
+ ExpectScriptTrue("result.value === 0xCAFECAFE");
}
TEST_F(ValueSerializerTestWithHostObject, RoundTripUint64) {
@@ -2501,21 +2110,17 @@ TEST_F(ValueSerializerTestWithHostObject, RoundTripUint64) {
static_cast<uint32_t>(value_packed))};
return NewHostObject(deserialization_context(), arraysize(argv), argv);
}));
- RoundTripTest("new ExampleHostObject(42, 0)", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 42"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.value2 === 0"));
- });
- RoundTripTest(
- "new ExampleHostObject(0xFFFFFFFF, 0x12345678)",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.value === 0xFFFFFFFF"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.value2 === 0x12345678"));
- });
+ Local<Value> value = RoundTripTest("new ExampleHostObject(42, 0)");
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype");
+ ExpectScriptTrue("result.value === 42");
+ ExpectScriptTrue("result.value2 === 0");
+
+ value = RoundTripTest("new ExampleHostObject(0xFFFFFFFF, 0x12345678)");
+ ExpectScriptTrue("result.value === 0xFFFFFFFF");
+ ExpectScriptTrue("result.value2 === 0x12345678");
}
TEST_F(ValueSerializerTestWithHostObject, RoundTripDouble) {
@@ -2538,22 +2143,21 @@ TEST_F(ValueSerializerTestWithHostObject, RoundTripDouble) {
Local<Value> argv[] = {Number::New(isolate(), value)};
return NewHostObject(deserialization_context(), arraysize(argv), argv);
}));
- RoundTripTest("new ExampleHostObject(-3.5)", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.value === -3.5"));
- });
- RoundTripTest("new ExampleHostObject(NaN)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("Number.isNaN(result.value)"));
- });
- RoundTripTest("new ExampleHostObject(Infinity)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("result.value === Infinity"));
- });
- RoundTripTest("new ExampleHostObject(-0)", [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool("1/result.value === -Infinity"));
- });
+ Local<Value> value = RoundTripTest("new ExampleHostObject(-3.5)");
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype");
+ ExpectScriptTrue("result.value === -3.5");
+
+ value = RoundTripTest("new ExampleHostObject(NaN)");
+ ExpectScriptTrue("Number.isNaN(result.value)");
+
+ value = RoundTripTest("new ExampleHostObject(Infinity)");
+ ExpectScriptTrue("result.value === Infinity");
+
+ value = RoundTripTest("new ExampleHostObject(-0)");
+ ExpectScriptTrue("1/result.value === -Infinity");
}
TEST_F(ValueSerializerTestWithHostObject, RoundTripRawBytes) {
@@ -2581,12 +2185,11 @@ TEST_F(ValueSerializerTestWithHostObject, RoundTripRawBytes) {
}
return NewHostObject(deserialization_context(), 0, nullptr);
}));
- RoundTripTest("new ExampleHostObject()", [this](Local<Value> value) {
- ASSERT_TRUE(value->IsObject());
- ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
- });
+ Local<Value> value = RoundTripTest("new ExampleHostObject()");
+ ASSERT_TRUE(value->IsObject());
+ ASSERT_TRUE(Object::Cast(*value)->InternalFieldCount());
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype");
}
TEST_F(ValueSerializerTestWithHostObject, RoundTripSameObject) {
@@ -2603,13 +2206,9 @@ TEST_F(ValueSerializerTestWithHostObject, RoundTripSameObject) {
EXPECT_TRUE(ReadExampleHostObjectTag());
return NewHostObject(deserialization_context(), 0, nullptr);
}));
- RoundTripTest(
- "({ a: new ExampleHostObject(), get b() { return this.a; }})",
- [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.a instanceof ExampleHostObject"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ RoundTripTest("({ a: new ExampleHostObject(), get b() { return this.a; }})");
+ ExpectScriptTrue("result.a instanceof ExampleHostObject");
+ ExpectScriptTrue("result.a === result.b");
}
TEST_F(ValueSerializerTestWithHostObject, DecodeSimpleHostObject) {
@@ -2618,11 +2217,9 @@ TEST_F(ValueSerializerTestWithHostObject, DecodeSimpleHostObject) {
EXPECT_TRUE(ReadExampleHostObjectTag());
return NewHostObject(deserialization_context(), 0, nullptr);
}));
- DecodeTest(
- {0xFF, 0x0D, 0x5C, kExampleHostObjectTag}, [this](Local<Value> value) {
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "Object.getPrototypeOf(result) === ExampleHostObject.prototype"));
- });
+ DecodeTest({0xFF, 0x0D, 0x5C, kExampleHostObjectTag});
+ ExpectScriptTrue(
+ "Object.getPrototypeOf(result) === ExampleHostObject.prototype");
}
class ValueSerializerTestWithHostArrayBufferView
@@ -2647,14 +2244,10 @@ TEST_F(ValueSerializerTestWithHostArrayBufferView, RoundTripUint8ArrayInput) {
return NewDummyUint8Array();
}));
RoundTripTest(
- "({ a: new Uint8Array([1, 2, 3]), get b() { return this.a; }})",
- [this](Local<Value> value) {
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.a instanceof Uint8Array"));
- EXPECT_TRUE(
- EvaluateScriptForResultBool("result.a.toString() === '4,5,6'"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
- });
+ "({ a: new Uint8Array([1, 2, 3]), get b() { return this.a; }})");
+ ExpectScriptTrue("result.a instanceof Uint8Array");
+ ExpectScriptTrue("result.a.toString() === '4,5,6'");
+ ExpectScriptTrue("result.a === result.b");
}
// It's expected that WebAssembly has more exhaustive tests elsewhere; this
@@ -2770,6 +2363,7 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
}
Local<WasmCompiledModule> MakeWasm() {
+ Context::Scope scope(serialization_context());
return WasmCompiledModule::DeserializeOrCompile(
isolate(), {nullptr, 0},
{kIncrementerWasm, sizeof(kIncrementerWasm)})
@@ -2777,22 +2371,20 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
}
void ExpectPass() {
- RoundTripTest(
- [this]() { return MakeWasm(); },
- [this](Local<Value> value) {
- ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new WebAssembly.Instance(result).exports.increment(8) === 9"));
- });
+ Local<Value> value = RoundTripTest(MakeWasm());
+ Context::Scope scope(deserialization_context());
+ ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
+ ExpectScriptTrue(
+ "new WebAssembly.Instance(result).exports.increment(8) === 9");
}
void ExpectFail() {
- EncodeTest(
- [this]() { return MakeWasm(); },
- [this](const std::vector<uint8_t>& data) { InvalidDecodeTest(data); });
+ const std::vector<uint8_t> data = EncodeTest(MakeWasm());
+ InvalidDecodeTest(data);
}
Local<Value> GetComplexObjectWithDuplicate() {
+ Context::Scope scope(serialization_context());
Local<Value> wasm_module = MakeWasm();
serialization_context()
->Global()
@@ -2809,14 +2401,13 @@ class ValueSerializerTestWithWasm : public ValueSerializerTest {
void VerifyComplexObject(Local<Value> value) {
ASSERT_TRUE(value->IsObject());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.mod1 instanceof WebAssembly.Module"));
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "result.mod2 instanceof WebAssembly.Module"));
- EXPECT_TRUE(EvaluateScriptForResultBool("result.num === 2"));
+ ExpectScriptTrue("result.mod1 instanceof WebAssembly.Module");
+ ExpectScriptTrue("result.mod2 instanceof WebAssembly.Module");
+ ExpectScriptTrue("result.num === 2");
}
Local<Value> GetComplexObjectWithMany() {
+ Context::Scope scope(serialization_context());
Local<Value> wasm_module1 = MakeWasm();
Local<Value> wasm_module2 = MakeWasm();
serialization_context()
@@ -2858,27 +2449,22 @@ const char* ValueSerializerTestWithWasm::kUnsupportedSerialization =
// embedder must decide serialization policy.
TEST_F(ValueSerializerTestWithWasm, DefaultSerializationDelegate) {
EnableThrowingSerializer();
- InvalidEncodeTest(
- [this]() { return MakeWasm(); },
- [](Local<Message> message) {
- size_t msg_len = static_cast<size_t>(message->Get()->Length());
- std::unique_ptr<char[]> buff(new char[msg_len + 1]);
- message->Get()->WriteOneByte(reinterpret_cast<uint8_t*>(buff.get()));
- // the message ends with the custom error string
- size_t custom_msg_len = strlen(kUnsupportedSerialization);
- ASSERT_GE(msg_len, custom_msg_len);
- size_t start_pos = msg_len - custom_msg_len;
- ASSERT_EQ(strcmp(&buff.get()[start_pos], kUnsupportedSerialization), 0);
- });
+ Local<Message> message = InvalidEncodeTest(MakeWasm());
+ size_t msg_len = static_cast<size_t>(message->Get()->Length());
+ std::unique_ptr<char[]> buff(new char[msg_len + 1]);
+ message->Get()->WriteOneByte(reinterpret_cast<uint8_t*>(buff.get()));
+ // the message ends with the custom error string
+ size_t custom_msg_len = strlen(kUnsupportedSerialization);
+ ASSERT_GE(msg_len, custom_msg_len);
+ size_t start_pos = msg_len - custom_msg_len;
+ ASSERT_EQ(strcmp(&buff.get()[start_pos], kUnsupportedSerialization), 0);
}
// The default deserializer throws if wasm transfer is attempted
TEST_F(ValueSerializerTestWithWasm, DefaultDeserializationDelegate) {
EnableTransferSerialization();
EnableDefaultDeserializer();
- EncodeTest(
- [this]() { return MakeWasm(); },
- [this](const std::vector<uint8_t>& data) { InvalidDecodeTest(data); });
+ ExpectFail();
}
// We only want to allow deserialization through
@@ -2913,43 +2499,31 @@ TEST_F(ValueSerializerTestWithWasm, CannotTransferWasmWhenExpectingInline) {
TEST_F(ValueSerializerTestWithWasm, ComplexObjectDuplicateTransfer) {
EnableTransferSerialization();
EnableTransferDeserialization();
- RoundTripTest(
- [this]() { return GetComplexObjectWithDuplicate(); },
- [this](Local<Value> value) {
- VerifyComplexObject(value);
- EXPECT_TRUE(EvaluateScriptForResultBool("result.mod1 === result.mod2"));
- });
+ Local<Value> value = RoundTripTest(GetComplexObjectWithDuplicate());
+ VerifyComplexObject(value);
+ ExpectScriptTrue("result.mod1 === result.mod2");
}
TEST_F(ValueSerializerTestWithWasm, ComplexObjectDuplicateInline) {
SetExpectInlineWasm(true);
- RoundTripTest(
- [this]() { return GetComplexObjectWithDuplicate(); },
- [this](Local<Value> value) {
- VerifyComplexObject(value);
- EXPECT_TRUE(EvaluateScriptForResultBool("result.mod1 === result.mod2"));
- });
+ Local<Value> value = RoundTripTest(GetComplexObjectWithDuplicate());
+ VerifyComplexObject(value);
+ ExpectScriptTrue("result.mod1 === result.mod2");
}
TEST_F(ValueSerializerTestWithWasm, ComplexObjectWithManyTransfer) {
EnableTransferSerialization();
EnableTransferDeserialization();
- RoundTripTest(
- [this]() { return GetComplexObjectWithMany(); },
- [this](Local<Value> value) {
- VerifyComplexObject(value);
- EXPECT_TRUE(EvaluateScriptForResultBool("result.mod1 != result.mod2"));
- });
+ Local<Value> value = RoundTripTest(GetComplexObjectWithMany());
+ VerifyComplexObject(value);
+ ExpectScriptTrue("result.mod1 != result.mod2");
}
TEST_F(ValueSerializerTestWithWasm, ComplexObjectWithManyInline) {
SetExpectInlineWasm(true);
- RoundTripTest(
- [this]() { return GetComplexObjectWithMany(); },
- [this](Local<Value> value) {
- VerifyComplexObject(value);
- EXPECT_TRUE(EvaluateScriptForResultBool("result.mod1 != result.mod2"));
- });
+ Local<Value> value = RoundTripTest(GetComplexObjectWithMany());
+ VerifyComplexObject(value);
+ ExpectScriptTrue("result.mod1 != result.mod2");
}
// As produced around Chrome 56.
@@ -3018,11 +2592,10 @@ TEST_F(ValueSerializerTestWithWasm, DecodeWasmModule) {
std::vector<uint8_t> raw(
kSerializedIncrementerWasm,
kSerializedIncrementerWasm + sizeof(kSerializedIncrementerWasm));
- DecodeTest(raw, [this](Local<Value> value) {
- ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new WebAssembly.Instance(result).exports.increment(8) === 9"));
- });
+ Local<Value> value = DecodeTest(raw);
+ ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
+ ExpectScriptTrue(
+ "new WebAssembly.Instance(result).exports.increment(8) === 9");
}
// As above, but with empty compiled data. Should work due to fallback to wire
@@ -3040,11 +2613,10 @@ TEST_F(ValueSerializerTestWithWasm, DecodeWasmModuleWithInvalidCompiledData) {
kSerializedIncrementerWasmWithInvalidCompiledData,
kSerializedIncrementerWasmWithInvalidCompiledData +
sizeof(kSerializedIncrementerWasmWithInvalidCompiledData));
- DecodeTest(raw, [this](Local<Value> value) {
- ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
- EXPECT_TRUE(EvaluateScriptForResultBool(
- "new WebAssembly.Instance(result).exports.increment(8) === 9"));
- });
+ Local<Value> value = DecodeTest(raw);
+ ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
+ ExpectScriptTrue(
+ "new WebAssembly.Instance(result).exports.increment(8) === 9");
}
// As above, but also with empty wire data. Should fail.
diff --git a/deps/v8/test/unittests/wasm/OWNERS b/deps/v8/test/unittests/wasm/OWNERS
index 972daf4d99..3972f0dd99 100644
--- a/deps/v8/test/unittests/wasm/OWNERS
+++ b/deps/v8/test/unittests/wasm/OWNERS
@@ -2,8 +2,6 @@ ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
eholk@chromium.org
-mtrofin@chromium.org
-rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 5cc4bf8196..3048ae51a3 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -1228,6 +1228,7 @@ TEST_F(FunctionBodyDecoderTest, MacrosInt64) {
}
TEST_F(FunctionBodyDecoderTest, AllSimpleExpressions) {
+ EXPERIMENTAL_FLAG_SCOPE(se);
// Test all simple expressions which are described by a signature.
#define DECODE_TEST(name, opcode, sig) \
{ \
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index 1b6af25a4a..75eeea013f 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -151,14 +151,13 @@ TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
CheckLooksLike(a, {{10, 15}, {20, 35}, {36, 40}});
}
-class WasmCodeManagerTest : public TestWithIsolate {
+enum ModuleStyle : int { Fixed = 0, Growable = 1 };
+
+class WasmCodeManagerTest : public TestWithIsolate,
+ public ::testing::WithParamInterface<ModuleStyle> {
public:
using NativeModulePtr = std::unique_ptr<NativeModule>;
- enum ModuleStyle : int { Fixed = 0, Growable = 1 };
- const std::vector<ModuleStyle> styles() const {
- return std::vector<ModuleStyle>({Fixed, Growable});
- }
// We pretend all our modules have 10 functions and no imports, just so
// we can size up the code_table.
NativeModulePtr AllocFixedModule(WasmCodeManager* manager, size_t size) {
@@ -187,7 +186,7 @@ class WasmCodeManagerTest : public TestWithIsolate {
std::unique_ptr<byte[]> exec_buff(new byte[size]);
desc.buffer = exec_buff.get();
desc.instr_size = static_cast<int>(size);
- return native_module->AddCode(desc, 0, index, 0, {}, false);
+ return native_module->AddCode(desc, 0, index, 0, 0, {}, false);
}
size_t page() const { return AllocatePageSize(); }
@@ -196,194 +195,171 @@ class WasmCodeManagerTest : public TestWithIsolate {
}
};
-TEST_F(WasmCodeManagerTest, EmptyCase) {
- for (auto style : styles()) {
- WasmCodeManager manager(v8_isolate(), 0 * page());
- CHECK_EQ(0, manager.remaining_uncommitted());
-
- NativeModulePtr native_module = AllocModule(&manager, 1 * page(), style);
- CHECK(native_module);
- WasmCode* code = AddCode(native_module.get(), 0, 10);
- CHECK_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted());
- native_module.reset();
- CHECK_EQ(0, manager.remaining_uncommitted());
- }
+INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
+ ::testing::Values(Fixed, Growable));
+
+TEST_P(WasmCodeManagerTest, EmptyCase) {
+ WasmCodeManager manager(v8_isolate(), 0 * page());
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
+ CHECK(native_module);
+ ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, 10),
+ "OOM in NativeModule::AddOwnedCode");
}
-TEST_F(WasmCodeManagerTest, AllocateAndGoOverLimit) {
- for (auto style : styles()) {
- WasmCodeManager manager(v8_isolate(), 1 * page());
- CHECK_EQ(1 * page(), manager.remaining_uncommitted());
- NativeModulePtr native_module = AllocModule(&manager, 1 * page(), style);
- CHECK(native_module);
- CHECK_EQ(1 * page(), manager.remaining_uncommitted());
- uint32_t index = 0;
- WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted());
-
- code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted());
-
- code = AddCode(native_module.get(), index++, page() - 4 * kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted());
-
- code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
- CHECK_NULL(code);
- CHECK_EQ(0, manager.remaining_uncommitted());
-
- native_module.reset();
- CHECK_EQ(1 * page(), manager.remaining_uncommitted());
- }
+TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
+ WasmCodeManager manager(v8_isolate(), 1 * page());
+ CHECK_EQ(1 * page(), manager.remaining_uncommitted());
+ NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
+ CHECK(native_module);
+ CHECK_EQ(1 * page(), manager.remaining_uncommitted());
+ uint32_t index = 0;
+ WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ code = AddCode(native_module.get(), index++, page() - 4 * kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager.remaining_uncommitted());
+
+ ASSERT_DEATH_IF_SUPPORTED(
+ AddCode(native_module.get(), index++, 1 * kCodeAlignment),
+ "OOM in NativeModule::AddOwnedCode");
}
-TEST_F(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
- for (auto style : styles()) {
- WasmCodeManager manager(v8_isolate(), 1 * page());
- NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
- NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), style);
- CHECK(nm1);
- CHECK(nm2);
- WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
- CHECK_NOT_NULL(code);
- code = AddCode(nm2.get(), 0, 1 * page());
- CHECK_NULL(code);
- }
+TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
+ WasmCodeManager manager(v8_isolate(), 1 * page());
+ NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
+ NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam());
+ CHECK(nm1);
+ CHECK(nm2);
+ WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
+ CHECK_NOT_NULL(code);
+ ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 1 * page()),
+ "OOM in NativeModule::AddOwnedCode");
}
-TEST_F(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
- for (auto style : styles()) {
- WasmCodeManager manager1(v8_isolate(), 1 * page());
- WasmCodeManager manager2(v8_isolate(), 2 * page());
- NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), style);
- NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), style);
- CHECK(nm1);
- CHECK(nm2);
- WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
- CHECK_NOT_NULL(code);
- CHECK_EQ(0, manager1.remaining_uncommitted());
- code = AddCode(nm2.get(), 0, 1 * page());
- CHECK_NOT_NULL(code);
- }
+TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
+ WasmCodeManager manager1(v8_isolate(), 1 * page());
+ WasmCodeManager manager2(v8_isolate(), 2 * page());
+ NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
+ NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
+ CHECK(nm1);
+ CHECK(nm2);
+ WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(0, manager1.remaining_uncommitted());
+ code = AddCode(nm2.get(), 0, 1 * page());
+ CHECK_NOT_NULL(code);
}
-TEST_F(WasmCodeManagerTest, GrowingVsFixedModule) {
- for (auto style : styles()) {
- WasmCodeManager manager(v8_isolate(), 3 * page());
- NativeModulePtr nm = AllocModule(&manager, 1 * page(), style);
- WasmCode* code = AddCode(nm.get(), 0, 1 * page() + kCodeAlignment);
- if (style == Fixed) {
- CHECK_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted(), 3 * page());
- } else {
- CHECK_NOT_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted(), 1 * page());
- }
+TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
+ WasmCodeManager manager(v8_isolate(), 3 * page());
+ NativeModulePtr nm = AllocModule(&manager, 1 * page(), GetParam());
+ if (GetParam() == Fixed) {
+ ASSERT_DEATH_IF_SUPPORTED(AddCode(nm.get(), 0, 1 * page() + kCodeAlignment),
+ "OOM in NativeModule::AddOwnedCode");
+ } else {
+ CHECK_NOT_NULL(AddCode(nm.get(), 0, 1 * page() + kCodeAlignment));
+ CHECK_EQ(manager.remaining_uncommitted(), 1 * page());
}
}
-TEST_F(WasmCodeManagerTest, CommitIncrements) {
- for (auto style : styles()) {
- WasmCodeManager manager(v8_isolate(), 10 * page());
- NativeModulePtr nm = AllocModule(&manager, 3 * page(), style);
- WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted(), 9 * page());
- code = AddCode(nm.get(), 1, 2 * page());
- CHECK_NOT_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
- code = AddCode(nm.get(), 2, page() - kCodeAlignment);
- CHECK_NOT_NULL(code);
- CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
- }
+TEST_P(WasmCodeManagerTest, CommitIncrements) {
+ WasmCodeManager manager(v8_isolate(), 10 * page());
+ NativeModulePtr nm = AllocModule(&manager, 3 * page(), GetParam());
+ WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 9 * page());
+ code = AddCode(nm.get(), 1, 2 * page());
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
+ code = AddCode(nm.get(), 2, page() - kCodeAlignment);
+ CHECK_NOT_NULL(code);
+ CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
}
-TEST_F(WasmCodeManagerTest, Lookup) {
- for (auto style : styles()) {
- WasmCodeManager manager(v8_isolate(), 2 * page());
-
- NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
- NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), style);
- WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
- CHECK_EQ(nm1.get(), code1_0->owner());
- WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
- WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
- WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
- CHECK_EQ(nm2.get(), code2_1->owner());
-
- CHECK_EQ(0, code1_0->index());
- CHECK_EQ(1, code1_1->index());
- CHECK_EQ(0, code2_0->index());
- CHECK_EQ(1, code2_1->index());
-
- // we know the manager object is allocated here, so we shouldn't
- // find any WasmCode* associated with that ptr.
- WasmCode* not_found =
- manager.LookupCode(reinterpret_cast<Address>(&manager));
- CHECK_NULL(not_found);
- WasmCode* found = manager.LookupCode(code1_0->instructions().start());
- CHECK_EQ(found, code1_0);
- found = manager.LookupCode(code2_1->instructions().start() +
- (code2_1->instructions().size() / 2));
- CHECK_EQ(found, code2_1);
- found = manager.LookupCode(code2_1->instructions().start() +
- code2_1->instructions().size() - 1);
- CHECK_EQ(found, code2_1);
- found = manager.LookupCode(code2_1->instructions().start() +
- code2_1->instructions().size());
- CHECK_NULL(found);
- Address mid_code1_1 =
- code1_1->instructions().start() + (code1_1->instructions().size() / 2);
- CHECK_EQ(code1_1, manager.LookupCode(mid_code1_1));
- nm1.reset();
- CHECK_NULL(manager.LookupCode(mid_code1_1));
- }
+TEST_P(WasmCodeManagerTest, Lookup) {
+ WasmCodeManager manager(v8_isolate(), 2 * page());
+
+ NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
+ NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam());
+ WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
+ CHECK_EQ(nm1.get(), code1_0->owner());
+ WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
+ WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
+ CHECK_EQ(nm2.get(), code2_1->owner());
+
+ CHECK_EQ(0, code1_0->index());
+ CHECK_EQ(1, code1_1->index());
+ CHECK_EQ(0, code2_0->index());
+ CHECK_EQ(1, code2_1->index());
+
+ // we know the manager object is allocated here, so we shouldn't
+ // find any WasmCode* associated with that ptr.
+ WasmCode* not_found = manager.LookupCode(reinterpret_cast<Address>(&manager));
+ CHECK_NULL(not_found);
+ WasmCode* found = manager.LookupCode(code1_0->instructions().start());
+ CHECK_EQ(found, code1_0);
+ found = manager.LookupCode(code2_1->instructions().start() +
+ (code2_1->instructions().size() / 2));
+ CHECK_EQ(found, code2_1);
+ found = manager.LookupCode(code2_1->instructions().start() +
+ code2_1->instructions().size() - 1);
+ CHECK_EQ(found, code2_1);
+ found = manager.LookupCode(code2_1->instructions().start() +
+ code2_1->instructions().size());
+ CHECK_NULL(found);
+ Address mid_code1_1 =
+ code1_1->instructions().start() + (code1_1->instructions().size() / 2);
+ CHECK_EQ(code1_1, manager.LookupCode(mid_code1_1));
+ nm1.reset();
+ CHECK_NULL(manager.LookupCode(mid_code1_1));
}
-TEST_F(WasmCodeManagerTest, MultiManagerLookup) {
- for (auto style : styles()) {
- WasmCodeManager manager1(v8_isolate(), 2 * page());
- WasmCodeManager manager2(v8_isolate(), 2 * page());
+TEST_P(WasmCodeManagerTest, MultiManagerLookup) {
+ WasmCodeManager manager1(v8_isolate(), 2 * page());
+ WasmCodeManager manager2(v8_isolate(), 2 * page());
- NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), style);
- NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), style);
+ NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
+ NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
- WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
- CHECK_EQ(nm1.get(), code1_0->owner());
- WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
- WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
- WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
- CHECK_EQ(nm2.get(), code2_1->owner());
+ WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
+ CHECK_EQ(nm1.get(), code1_0->owner());
+ WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
+ WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
+ CHECK_EQ(nm2.get(), code2_1->owner());
- CHECK_EQ(0, code1_0->index());
- CHECK_EQ(1, code1_1->index());
- CHECK_EQ(0, code2_0->index());
- CHECK_EQ(1, code2_1->index());
+ CHECK_EQ(0, code1_0->index());
+ CHECK_EQ(1, code1_1->index());
+ CHECK_EQ(0, code2_0->index());
+ CHECK_EQ(1, code2_1->index());
- CHECK_EQ(code1_0, manager1.LookupCode(code1_0->instructions().start()));
- CHECK_NULL(manager2.LookupCode(code1_0->instructions().start()));
- }
+ CHECK_EQ(code1_0, manager1.LookupCode(code1_0->instructions().start()));
+ CHECK_NULL(manager2.LookupCode(code1_0->instructions().start()));
}
-TEST_F(WasmCodeManagerTest, LookupWorksAfterRewrite) {
- for (auto style : styles()) {
- WasmCodeManager manager(v8_isolate(), 2 * page());
-
- NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
-
- WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
- WasmCode* code1 = AddCode(nm1.get(), 1, kCodeAlignment);
- CHECK_EQ(0, code0->index());
- CHECK_EQ(1, code1->index());
- CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
- WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
- CHECK_EQ(1, code1_1->index());
- CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
- CHECK_EQ(code1_1, manager.LookupCode(code1_1->instructions().start()));
- }
+TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
+ WasmCodeManager manager(v8_isolate(), 2 * page());
+
+ NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
+
+ WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
+ WasmCode* code1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ CHECK_EQ(0, code0->index());
+ CHECK_EQ(1, code1->index());
+ CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
+ WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
+ CHECK_EQ(1, code1_1->index());
+ CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
+ CHECK_EQ(code1_1, manager.LookupCode(code1_1->instructions().start()));
}
} // namespace wasm_heap_unittest
diff --git a/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
index 73e43a30d9..0883dd9538 100644
--- a/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
+++ b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
@@ -202,5 +202,83 @@ TEST(ZoneChunkList, BigCopyToTest) {
}
}
+void TestForwardIterationOfConstList(
+ const ZoneChunkList<uintptr_t>& zone_chunk_list) {
+ size_t count = 0;
+
+ for (uintptr_t item : zone_chunk_list) {
+ EXPECT_EQ(static_cast<size_t>(item), count);
+ count++;
+ }
+
+ EXPECT_EQ(count, kItemCount);
+}
+
+TEST(ZoneChunkList, ConstForwardIterationTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ TestForwardIterationOfConstList(zone_chunk_list);
+}
+
+TEST(ZoneChunkList, RewindAndIterate) {
+ // Regression test for https://bugs.chromium.org/p/v8/issues/detail?id=7478
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<int> zone_chunk_list(&zone);
+
+ // Fill the list enough so that it will contain 2 chunks.
+ int chunk_size = static_cast<int>(ZoneChunkList<int>::StartMode::kSmall);
+ for (int i = 0; i < chunk_size + 1; ++i) {
+ zone_chunk_list.push_back(i);
+ }
+
+ // Rewind and fill the first chunk again.
+ zone_chunk_list.Rewind();
+ for (int i = 0; i < chunk_size; ++i) {
+ zone_chunk_list.push_back(i);
+ }
+
+ std::vector<int> expected;
+ for (int i = 0; i < chunk_size; ++i) {
+ expected.push_back(i);
+ }
+ std::vector<int> got;
+
+ // Iterate. This used to not yield the expected result, since the end iterator
+ // was in a weird state, and the running iterator didn't reach it after the
+ // first chunk.
+ auto it = zone_chunk_list.begin();
+ while (it != zone_chunk_list.end()) {
+ int value = *it;
+ got.push_back(value);
+ ++it;
+ }
+ CHECK_EQ(expected.size(), got.size());
+ for (size_t i = 0; i < expected.size(); ++i) {
+ CHECK_EQ(expected[i], got[i]);
+ }
+}
+
+TEST(ZoneChunkList, PushBackPopBackSize) {
+ // Regression test for https://bugs.chromium.org/p/v8/issues/detail?id=7489
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<int> zone_chunk_list(&zone);
+ CHECK_EQ(size_t(0), zone_chunk_list.size());
+ zone_chunk_list.push_back(1);
+ CHECK_EQ(size_t(1), zone_chunk_list.size());
+ zone_chunk_list.pop_back();
+ CHECK_EQ(size_t(0), zone_chunk_list.size());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/wasm-spec-tests/OWNERS b/deps/v8/test/wasm-spec-tests/OWNERS
index c42a6f4caa..88cf9bea30 100644
--- a/deps/v8/test/wasm-spec-tests/OWNERS
+++ b/deps/v8/test/wasm-spec-tests/OWNERS
@@ -1,6 +1,5 @@
ahaas@chromium.org
clemensh@chromium.org
machenbach@chromium.org
-rossberg@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index 7f99ed4711..3571b62807 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -8,7 +8,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
@@ -27,9 +27,9 @@ class TestSuite(testsuite.TestSuite):
class TestCase(testcase.TestCase):
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [os.path.join(self.suite.root, self.path + self._get_suffix())]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 6fc33c12e2..cf23da723f 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-310ca173c041a53775a713ac948c3627ae357f8d \ No newline at end of file
+6bad244128aba5a6621f7c1270ec0356c735de24 \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp
deleted file mode 100644
index 711f982c9a..0000000000
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'wasm_spec_tests_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'wasm-spec-tests.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt
index 7c2a301dc3..8b44ae59b5 100644
--- a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt
@@ -3,8 +3,6 @@ Test whether Promise treats thenable correctly.
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
The promise is already rejected now.
-PASS rejected
-PASS result is "hello"
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
index 2a09635ebe..f6dbe354e7 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
@@ -497,8 +497,8 @@ PASS Invalid: "try {} finally {} catch(e) {}"
PASS Invalid: "function f() { try {} finally {} catch(e) {} }"
PASS Invalid: "try {} catch (...) {}"
PASS Invalid: "function f() { try {} catch (...) {} }"
-PASS Invalid: "try {} catch {}"
-PASS Invalid: "function f() { try {} catch {} }"
+PASS Valid: "try {} catch {}"
+PASS Valid: "function f() { try {} catch {} }"
PASS Valid: "if (a) try {} finally {} else b;"
PASS Valid: "function f() { if (a) try {} finally {} else b; }"
PASS Valid: "if (--a()) do with(1) try {} catch(ke) { f() ; g() } while (a in b) else {}"
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check.js b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
index 6902f530a4..15be4b048b 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check.js
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
@@ -331,7 +331,7 @@ invalid("try {} catch(e)");
invalid("try {} finally");
invalid("try {} finally {} catch(e) {}");
invalid("try {} catch (...) {}");
-invalid("try {} catch {}");
+valid ("try {} catch {}");
valid ("if (a) try {} finally {} else b;");
valid ("if (--a()) do with(1) try {} catch(ke) { f() ; g() } while (a in b) else {}");
invalid("if (a) try {} else b; catch (e) { }");
diff --git a/deps/v8/test/webkit/string-trim-expected.txt b/deps/v8/test/webkit/string-trim-expected.txt
index 6472f89d0b..22d2e4776a 100644
--- a/deps/v8/test/webkit/string-trim-expected.txt
+++ b/deps/v8/test/webkit/string-trim-expected.txt
@@ -21,130 +21,212 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-This test checks String.trim(), String.trimLeft() and String.trimRight() methods.
+This test checks the `trim`, `trimStart`/`trimLeft`, and `trimEnd`/`trimRight` methods on `String.prototype`.
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS whitespace[0].s.trim() is ''
+PASS whitespace[0].s.trimStart() is ''
PASS whitespace[0].s.trimLeft() is ''
+PASS whitespace[0].s.trimEnd() is ''
PASS whitespace[0].s.trimRight() is ''
PASS whitespace[1].s.trim() is ''
+PASS whitespace[1].s.trimStart() is ''
PASS whitespace[1].s.trimLeft() is ''
+PASS whitespace[1].s.trimEnd() is ''
PASS whitespace[1].s.trimRight() is ''
PASS whitespace[2].s.trim() is ''
+PASS whitespace[2].s.trimStart() is ''
PASS whitespace[2].s.trimLeft() is ''
+PASS whitespace[2].s.trimEnd() is ''
PASS whitespace[2].s.trimRight() is ''
PASS whitespace[3].s.trim() is ''
+PASS whitespace[3].s.trimStart() is ''
PASS whitespace[3].s.trimLeft() is ''
+PASS whitespace[3].s.trimEnd() is ''
PASS whitespace[3].s.trimRight() is ''
PASS whitespace[4].s.trim() is ''
+PASS whitespace[4].s.trimStart() is ''
PASS whitespace[4].s.trimLeft() is ''
+PASS whitespace[4].s.trimEnd() is ''
PASS whitespace[4].s.trimRight() is ''
PASS whitespace[5].s.trim() is ''
+PASS whitespace[5].s.trimStart() is ''
PASS whitespace[5].s.trimLeft() is ''
+PASS whitespace[5].s.trimEnd() is ''
PASS whitespace[5].s.trimRight() is ''
PASS whitespace[6].s.trim() is ''
+PASS whitespace[6].s.trimStart() is ''
PASS whitespace[6].s.trimLeft() is ''
+PASS whitespace[6].s.trimEnd() is ''
PASS whitespace[6].s.trimRight() is ''
PASS whitespace[7].s.trim() is ''
+PASS whitespace[7].s.trimStart() is ''
PASS whitespace[7].s.trimLeft() is ''
+PASS whitespace[7].s.trimEnd() is ''
PASS whitespace[7].s.trimRight() is ''
PASS whitespace[8].s.trim() is ''
+PASS whitespace[8].s.trimStart() is ''
PASS whitespace[8].s.trimLeft() is ''
+PASS whitespace[8].s.trimEnd() is ''
PASS whitespace[8].s.trimRight() is ''
PASS whitespace[9].s.trim() is ''
+PASS whitespace[9].s.trimStart() is ''
PASS whitespace[9].s.trimLeft() is ''
+PASS whitespace[9].s.trimEnd() is ''
PASS whitespace[9].s.trimRight() is ''
PASS whitespace[10].s.trim() is ''
+PASS whitespace[10].s.trimStart() is ''
PASS whitespace[10].s.trimLeft() is ''
+PASS whitespace[10].s.trimEnd() is ''
PASS whitespace[10].s.trimRight() is ''
PASS whitespace[11].s.trim() is ''
+PASS whitespace[11].s.trimStart() is ''
PASS whitespace[11].s.trimLeft() is ''
+PASS whitespace[11].s.trimEnd() is ''
PASS whitespace[11].s.trimRight() is ''
PASS whitespace[12].s.trim() is ''
+PASS whitespace[12].s.trimStart() is ''
PASS whitespace[12].s.trimLeft() is ''
+PASS whitespace[12].s.trimEnd() is ''
PASS whitespace[12].s.trimRight() is ''
PASS whitespace[13].s.trim() is ''
+PASS whitespace[13].s.trimStart() is ''
PASS whitespace[13].s.trimLeft() is ''
+PASS whitespace[13].s.trimEnd() is ''
PASS whitespace[13].s.trimRight() is ''
PASS whitespace[14].s.trim() is ''
+PASS whitespace[14].s.trimStart() is ''
PASS whitespace[14].s.trimLeft() is ''
+PASS whitespace[14].s.trimEnd() is ''
PASS whitespace[14].s.trimRight() is ''
PASS whitespace[15].s.trim() is ''
+PASS whitespace[15].s.trimStart() is ''
PASS whitespace[15].s.trimLeft() is ''
+PASS whitespace[15].s.trimEnd() is ''
PASS whitespace[15].s.trimRight() is ''
PASS whitespace[16].s.trim() is ''
+PASS whitespace[16].s.trimStart() is ''
PASS whitespace[16].s.trimLeft() is ''
+PASS whitespace[16].s.trimEnd() is ''
PASS whitespace[16].s.trimRight() is ''
PASS whitespace[17].s.trim() is ''
+PASS whitespace[17].s.trimStart() is ''
PASS whitespace[17].s.trimLeft() is ''
+PASS whitespace[17].s.trimEnd() is ''
PASS whitespace[17].s.trimRight() is ''
PASS whitespace[18].s.trim() is ''
+PASS whitespace[18].s.trimStart() is ''
PASS whitespace[18].s.trimLeft() is ''
+PASS whitespace[18].s.trimEnd() is ''
PASS whitespace[18].s.trimRight() is ''
PASS whitespace[19].s.trim() is ''
+PASS whitespace[19].s.trimStart() is ''
PASS whitespace[19].s.trimLeft() is ''
+PASS whitespace[19].s.trimEnd() is ''
PASS whitespace[19].s.trimRight() is ''
PASS whitespace[20].s.trim() is ''
+PASS whitespace[20].s.trimStart() is ''
PASS whitespace[20].s.trimLeft() is ''
+PASS whitespace[20].s.trimEnd() is ''
PASS whitespace[20].s.trimRight() is ''
FAIL whitespace[21].s.trim() should be . Was ​.
+FAIL whitespace[21].s.trimStart() should be . Was ​.
FAIL whitespace[21].s.trimLeft() should be . Was ​.
+FAIL whitespace[21].s.trimEnd() should be . Was ​.
FAIL whitespace[21].s.trimRight() should be . Was ​.
FAIL wsString.trim() should be . Was ​.
+FAIL wsString.trimStart() should be . Was ​.
FAIL wsString.trimLeft() should be . Was ​.
-FAIL wsString.trimRight() should be . Was
+FAIL wsString.trimEnd() should be . Was
             

​.
-FAIL trimString.trim() should be foo bar. Was ​foo bar
+FAIL wsString.trimRight() should be . Was
             

​.
-FAIL trimString.trimLeft() should be foo bar
+FAIL trimString.trim() should be foo bar. Was ​foo bar
-              

​. Was ​foo bar
+              

​.
+FAIL trimString.trimStart() should be foo bar
+
+              

​. Was ​foo bar
+
+              

​.
+FAIL trimString.trimLeft() should be foo bar
+
+              

​. Was ​foo bar
             

​.
-FAIL trimString.trimRight() should be
+FAIL trimString.trimEnd() should be
-              

​foo bar. Was
+              

​foo bar. Was
-              

​foo bar
+              

​foo bar
             

​.
-FAIL leftTrimString.trim() should be foo bar. Was foo bar
+FAIL trimString.trimRight() should be
+
+              

​foo bar. Was
+
+              

​foo bar
+
+              

​.
+FAIL leftTrimString.trim() should be foo bar. Was foo bar
             

​.
+PASS leftTrimString.trimStart() is leftTrimString
PASS leftTrimString.trimLeft() is leftTrimString
-FAIL leftTrimString.trimRight() should be foo bar. Was foo bar
+FAIL leftTrimString.trimEnd() should be foo bar. Was foo bar
+
+              

​.
+FAIL leftTrimString.trimRight() should be foo bar. Was foo bar
             

​.
FAIL rightTrimString.trim() should be foo bar. Was ​foo bar.
+FAIL rightTrimString.trimStart() should be foo bar. Was ​foo bar.
FAIL rightTrimString.trimLeft() should be foo bar. Was ​foo bar.
+PASS rightTrimString.trimEnd() is rightTrimString
PASS rightTrimString.trimRight() is rightTrimString
PASS trim.call(0) is '0'
+PASS trimStart.call(0) is '0'
PASS trimLeft.call(0) is '0'
+PASS trimEnd.call(0) is '0'
PASS trimRight.call(0) is '0'
PASS trim.call(Infinity) is 'Infinity'
+PASS trimStart.call(Infinity) is 'Infinity'
PASS trimLeft.call(Infinity) is 'Infinity'
+PASS trimEnd.call(Infinity) is 'Infinity'
PASS trimRight.call(Infinity) is 'Infinity'
PASS trim.call(NaN) is 'NaN'
+PASS trimStart.call(NaN) is 'NaN'
PASS trimLeft.call(NaN) is 'NaN'
+PASS trimEnd.call(NaN) is 'NaN'
PASS trimRight.call(NaN) is 'NaN'
PASS trim.call(true) is 'true'
+PASS trimStart.call(true) is 'true'
PASS trimLeft.call(true) is 'true'
+PASS trimEnd.call(true) is 'true'
PASS trimRight.call(true) is 'true'
PASS trim.call(false) is 'false'
+PASS trimStart.call(false) is 'false'
PASS trimLeft.call(false) is 'false'
+PASS trimEnd.call(false) is 'false'
PASS trimRight.call(false) is 'false'
PASS trim.call(({})) is '[object Object]'
+PASS trimStart.call(({})) is '[object Object]'
PASS trimLeft.call(({})) is '[object Object]'
+PASS trimEnd.call(({})) is '[object Object]'
PASS trimRight.call(({})) is '[object Object]'
PASS trim.call(({toString:function(){return 'wibble'}})) is 'wibble'
+PASS trimStart.call(({toString:function(){return 'wibble'}})) is 'wibble'
PASS trimLeft.call(({toString:function(){return 'wibble'}})) is 'wibble'
+PASS trimEnd.call(({toString:function(){return 'wibble'}})) is 'wibble'
PASS trimRight.call(({toString:function(){return 'wibble'}})) is 'wibble'
PASS trim.call(['an','array']) is 'an,array'
+PASS trimStart.call(['an','array']) is 'an,array'
PASS trimLeft.call(['an','array']) is 'an,array'
+PASS trimEnd.call(['an','array']) is 'an,array'
PASS trimRight.call(['an','array']) is 'an,array'
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/string-trim.js b/deps/v8/test/webkit/string-trim.js
index 27f99d579a..fd9c1d1557 100644
--- a/deps/v8/test/webkit/string-trim.js
+++ b/deps/v8/test/webkit/string-trim.js
@@ -21,74 +21,95 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-description("This test checks String.trim(), String.trimLeft() and String.trimRight() methods.");
+// Flags: --harmony-string-trimming
-//references to trim(), trimLeft() and trimRight() functions for testing Function's *.call() and *.apply() methods
-var trim = String.prototype.trim;
-var trimLeft = String.prototype.trimLeft;
-var trimRight = String.prototype.trimRight;
+description("This test checks the `trim`, `trimStart`/`trimLeft`, and `trimEnd`/`trimRight` methods on `String.prototype`.");
-var testString = 'foo bar';
-var trimString = '';
-var leftTrimString = '';
-var rightTrimString = '';
-var wsString = '';
+// References to trim(), trimLeft() and trimRight() functions for testing Function's *.call() and *.apply() methods
+const trim = String.prototype.trim;
+const trimStart = String.prototype.trimStart;
+const trimLeft = String.prototype.trimLeft;
+const trimEnd = String.prototype.trimEnd;
+const trimRight = String.prototype.trimRight;
-var whitespace = [
- {s : '\u0009', t : 'HORIZONTAL TAB'},
- {s : '\u000A', t : 'LINE FEED OR NEW LINE'},
- {s : '\u000B', t : 'VERTICAL TAB'},
- {s : '\u000C', t : 'FORMFEED'},
- {s : '\u000D', t : 'CARRIAGE RETURN'},
- {s : '\u0020', t : 'SPACE'},
- {s : '\u00A0', t : 'NO-BREAK SPACE'},
- {s : '\u2000', t : 'EN QUAD'},
- {s : '\u2001', t : 'EM QUAD'},
- {s : '\u2002', t : 'EN SPACE'},
- {s : '\u2003', t : 'EM SPACE'},
- {s : '\u2004', t : 'THREE-PER-EM SPACE'},
- {s : '\u2005', t : 'FOUR-PER-EM SPACE'},
- {s : '\u2006', t : 'SIX-PER-EM SPACE'},
- {s : '\u2007', t : 'FIGURE SPACE'},
- {s : '\u2008', t : 'PUNCTUATION SPACE'},
- {s : '\u2009', t : 'THIN SPACE'},
- {s : '\u200A', t : 'HAIR SPACE'},
- {s : '\u3000', t : 'IDEOGRAPHIC SPACE'},
- {s : '\u2028', t : 'LINE SEPARATOR'},
- {s : '\u2029', t : 'PARAGRAPH SEPARATOR'},
- {s : '\u200B', t : 'ZERO WIDTH SPACE (category Cf)'}
+const whitespace = [
+ {s: '\u0009', t: 'HORIZONTAL TAB'},
+ {s: '\u000A', t: 'LINE FEED OR NEW LINE'},
+ {s: '\u000B', t: 'VERTICAL TAB'},
+ {s: '\u000C', t: 'FORMFEED'},
+ {s: '\u000D', t: 'CARRIAGE RETURN'},
+ {s: '\u0020', t: 'SPACE'},
+ {s: '\u00A0', t: 'NO-BREAK SPACE'},
+ {s: '\u2000', t: 'EN QUAD'},
+ {s: '\u2001', t: 'EM QUAD'},
+ {s: '\u2002', t: 'EN SPACE'},
+ {s: '\u2003', t: 'EM SPACE'},
+ {s: '\u2004', t: 'THREE-PER-EM SPACE'},
+ {s: '\u2005', t: 'FOUR-PER-EM SPACE'},
+ {s: '\u2006', t: 'SIX-PER-EM SPACE'},
+ {s: '\u2007', t: 'FIGURE SPACE'},
+ {s: '\u2008', t: 'PUNCTUATION SPACE'},
+ {s: '\u2009', t: 'THIN SPACE'},
+ {s: '\u200A', t: 'HAIR SPACE'},
+ {s: '\u3000', t: 'IDEOGRAPHIC SPACE'},
+ {s: '\u2028', t: 'LINE SEPARATOR'},
+ {s: '\u2029', t: 'PARAGRAPH SEPARATOR'},
+ {s: '\u200B', t: 'ZERO WIDTH SPACE (category Cf)'},
];
-for (var i = 0; i < whitespace.length; i++) {
- shouldBe("whitespace["+i+"].s.trim()", "''");
- shouldBe("whitespace["+i+"].s.trimLeft()", "''");
- shouldBe("whitespace["+i+"].s.trimRight()", "''");
- wsString += whitespace[i].s;
+let wsString = '';
+for (let i = 0; i < whitespace.length; i++) {
+ shouldBe("whitespace["+i+"].s.trim()", "''");
+ shouldBe("whitespace["+i+"].s.trimStart()", "''");
+ shouldBe("whitespace["+i+"].s.trimLeft()", "''");
+ shouldBe("whitespace["+i+"].s.trimEnd()", "''");
+ shouldBe("whitespace["+i+"].s.trimRight()", "''");
+ wsString += whitespace[i].s;
}
-trimString = wsString + testString + wsString;
-leftTrimString = testString + wsString; //trimmed from the left
-rightTrimString = wsString + testString; //trimmed from the right
+const testString = 'foo bar';
+const trimString = wsString + testString + wsString;
+const leftTrimString = testString + wsString; //trimmed from the left
+const rightTrimString = wsString + testString; //trimmed from the right
shouldBe("wsString.trim()", "''");
+shouldBe("wsString.trimStart()", "''");
shouldBe("wsString.trimLeft()", "''");
+shouldBe("wsString.trimEnd()", "''");
shouldBe("wsString.trimRight()", "''");
shouldBe("trimString.trim()", "testString");
+shouldBe("trimString.trimStart()", "leftTrimString");
shouldBe("trimString.trimLeft()", "leftTrimString");
+shouldBe("trimString.trimEnd()", "rightTrimString");
shouldBe("trimString.trimRight()", "rightTrimString");
shouldBe("leftTrimString.trim()", "testString");
+shouldBe("leftTrimString.trimStart()", "leftTrimString");
shouldBe("leftTrimString.trimLeft()", "leftTrimString");
+shouldBe("leftTrimString.trimEnd()", "testString");
shouldBe("leftTrimString.trimRight()", "testString");
shouldBe("rightTrimString.trim()", "testString");
+shouldBe("rightTrimString.trimStart()", "testString");
shouldBe("rightTrimString.trimLeft()", "testString");
+shouldBe("rightTrimString.trimEnd()", "rightTrimString");
shouldBe("rightTrimString.trimRight()", "rightTrimString");
-var testValues = ["0", "Infinity", "NaN", "true", "false", "({})", "({toString:function(){return 'wibble'}})", "['an','array']"];
-for (var i = 0; i < testValues.length; i++) {
- shouldBe("trim.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");
- shouldBe("trimLeft.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");
- shouldBe("trimRight.call("+testValues[i]+")", "'"+eval(testValues[i])+"'");
+const testValues = [
+ "0",
+ "Infinity",
+ "NaN",
+ "true",
+ "false",
+ "({})",
+ "({toString:function(){return 'wibble'}})",
+ "['an','array']",
+];
+for (const testValue of testValues) {
+ shouldBe("trim.call("+testValue+")", "'"+eval(testValue)+"'");
+ shouldBe("trimStart.call("+testValue+")", "'"+eval(testValue)+"'");
+ shouldBe("trimLeft.call("+testValue+")", "'"+eval(testValue)+"'");
+ shouldBe("trimEnd.call("+testValue+")", "'"+eval(testValue)+"'");
+ shouldBe("trimRight.call("+testValue+")", "'"+eval(testValue)+"'");
}
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index bd0149ffbc..d76527276a 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -38,7 +38,7 @@ SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
# TODO (machenbach): Share commonalities with mjstest.
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
@@ -90,9 +90,9 @@ class TestCase(testcase.TestCase):
files.append(os.path.join(self.suite.root, "resources/standalone-post.js"))
return files
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
files = self._source_files
- if ctx.isolates:
+ if self._test_config.isolates:
files = files + ['--isolate'] + files
return files
@@ -109,5 +109,5 @@ class TestCase(testcase.TestCase):
os.path.join(self.suite.root, self.path) + '-expected.txt')
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/webkit/webkit.gyp b/deps/v8/test/webkit/webkit.gyp
deleted file mode 100644
index cd4c4b981b..0000000000
--- a/deps/v8/test/webkit/webkit.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'webkit_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'webkit.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/testing/gmock.gyp b/deps/v8/testing/gmock.gyp
deleted file mode 100644
index 89c97e32ec..0000000000
--- a/deps/v8/testing/gmock.gyp
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'targets': [
- {
- 'target_name': 'gmock',
- 'type': 'static_library',
- 'dependencies': [
- 'gtest.gyp:gtest',
- ],
- 'sources': [
- # Sources based on files in r173 of gmock.
- 'gmock/include/gmock/gmock-actions.h',
- 'gmock/include/gmock/gmock-cardinalities.h',
- 'gmock/include/gmock/gmock-generated-actions.h',
- 'gmock/include/gmock/gmock-generated-function-mockers.h',
- 'gmock/include/gmock/gmock-generated-matchers.h',
- 'gmock/include/gmock/gmock-generated-nice-strict.h',
- 'gmock/include/gmock/gmock-matchers.h',
- 'gmock/include/gmock/gmock-spec-builders.h',
- 'gmock/include/gmock/gmock.h',
- 'gmock/include/gmock/internal/gmock-generated-internal-utils.h',
- 'gmock/include/gmock/internal/gmock-internal-utils.h',
- 'gmock/include/gmock/internal/gmock-port.h',
- 'gmock/src/gmock-all.cc',
- 'gmock/src/gmock-cardinalities.cc',
- 'gmock/src/gmock-internal-utils.cc',
- 'gmock/src/gmock-matchers.cc',
- 'gmock/src/gmock-spec-builders.cc',
- 'gmock/src/gmock.cc',
- 'gmock-support.h', # gMock helpers
- 'gmock_custom/gmock/internal/custom/gmock-port.h',
- ],
- 'sources!': [
- 'gmock/src/gmock-all.cc', # Not needed by our build.
- ],
- 'include_dirs': [
- 'gmock_custom',
- 'gmock',
- 'gmock/include',
- ],
- 'all_dependent_settings': {
- 'include_dirs': [
- 'gmock_custom',
- 'gmock/include', # So that gmock headers can find themselves.
- ],
- },
- 'export_dependent_settings': [
- 'gtest.gyp:gtest',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- },
- {
- 'target_name': 'gmock_main',
- 'type': 'static_library',
- 'dependencies': [
- 'gmock',
- ],
- 'sources': [
- 'gmock/src/gmock_main.cc',
- ],
- },
- ],
-}
diff --git a/deps/v8/gypfiles/detect_v8_host_arch.py b/deps/v8/third_party/binutils/detect_v8_host_arch.py
index 89e8286e1f..3a5a5762d9 100644
--- a/deps/v8/gypfiles/detect_v8_host_arch.py
+++ b/deps/v8/third_party/binutils/detect_v8_host_arch.py
@@ -38,6 +38,9 @@ def main():
return 0
def DoMain(_):
+ return DetectHostArch();
+
+def DetectHostArch():
"""Hook to be called from gyp without starting a separate python
interpreter."""
host_arch = platform.machine()
diff --git a/deps/v8/third_party/binutils/download.py b/deps/v8/third_party/binutils/download.py
index a8ad814dbe..99f43708f5 100755
--- a/deps/v8/third_party/binutils/download.py
+++ b/deps/v8/third_party/binutils/download.py
@@ -15,6 +15,7 @@ import re
import shutil
import subprocess
import sys
+from detect_v8_host_arch import DetectHostArch
BINUTILS_DIR = os.path.abspath(os.path.dirname(__file__))
@@ -22,9 +23,6 @@ BINUTILS_FILE = 'binutils.tar.bz2'
BINUTILS_TOOLS = ['bin/ld.gold', 'bin/objcopy', 'bin/objdump']
BINUTILS_OUT = 'Release'
-DETECT_HOST_ARCH = os.path.abspath(os.path.join(
- BINUTILS_DIR, '../../gypfiles/detect_v8_host_arch.py'))
-
def ReadFile(filename):
with file(filename, 'r') as f:
@@ -48,7 +46,7 @@ def GetArch():
return 'x64'
return arch
- return subprocess.check_output(['python', DETECT_HOST_ARCH]).strip()
+ return DetectHostArch()
def FetchAndExtract(arch):
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 1c0864d0d8..d8fdc49505 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -11,7 +11,6 @@ group("gn_all") {
deps = [
":check-static-initializers_run",
":jsfunfuzz_run",
- ":run-deopt-fuzzer_run",
":run-gcmole_run",
":run-num-fuzzer_run",
]
@@ -34,14 +33,6 @@ v8_isolate_run("jsfunfuzz") {
isolate = "jsfunfuzz/jsfunfuzz.isolate"
}
-v8_isolate_run("run-deopt-fuzzer") {
- deps = [
- "..:d8_run",
- ]
-
- isolate = "run-deopt-fuzzer.isolate"
-}
-
v8_isolate_run("run-gcmole") {
deps = [
"..:d8_run",
diff --git a/deps/v8/tools/Makefile.tags b/deps/v8/tools/Makefile.tags
new file mode 100644
index 0000000000..372824dad7
--- /dev/null
+++ b/deps/v8/tools/Makefile.tags
@@ -0,0 +1,30 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# Variable default definitions. Override them by exporting them in your shell.
+V8_DIR ?= $(realpath $(dir $(lastword $(MAKEFILE_LIST)))/..)
+
+# Support for the GNU GLOBAL Source Code Tag System.
+$(V8_DIR)/gtags.files: $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+ @(cd $(V8_DIR) && find include src test -name '*.h' -o -name '*.cc' -o -name '*.c') > $@
+
+# We need to manually set the stack limit here, to work around bugs in
+# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
+# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
+# gtags.files after switching branches don't cause recipe failures.
+$(V8_DIR)/GPATH $(V8_DIR)/GRTAGS $(V8_DIR)/GSYMS $(V8_DIR)/GTAGS: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+ @cd $(V8_DIR) && bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
+
+$(V8_DIR)/tags: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+ @(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
+ (echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
+ @cd $(V8_DIR) && ctags --fields=+l -L gtags.files
+
+tags: $(V8_DIR)/tags
+
+tags.clean:
+ @rm -f $(addprefix $(V8_DIR), gtags.files GPATH GRTAGS GSYMS GTAGS tags)
+
+clean: tags.clean
diff --git a/deps/v8/tools/bigint-tester.py b/deps/v8/tools/bigint-tester.py
index 0452a0d1db..d48d2546f9 100755
--- a/deps/v8/tools/bigint-tester.py
+++ b/deps/v8/tools/bigint-tester.py
@@ -13,9 +13,9 @@ import sys
import tempfile
# Configuration.
-kChars = "0123456789abcdefghijklmnopqrstuvwxyz"
+kChars = "0123456789abcdef"
kBase = 16
-kLineLength = 71 # A bit less than 80.
+kLineLength = 70 # A bit less than 80.
kNumInputsGenerate = 20
kNumInputsStress = 1000
@@ -46,29 +46,36 @@ if (error_count !== 0) {
}"""
def GenRandom(length, negative=kRandom):
- if length == 0: return "0"
+ if length == 0: return "0n"
s = []
if negative == kYes or (negative == kRandom and (random.randint(0, 1) == 0)):
s.append("-") # 50% chance of negative.
+ s.append("0x")
s.append(kChars[random.randint(1, kBase - 1)]) # No leading zero.
for i in range(1, length):
s.append(kChars[random.randint(0, kBase - 1)])
+ s.append("n")
return "".join(s)
-def Format(x, base):
+def Parse(x):
+ assert x[-1] == 'n', x
+ return int(x[:-1], kBase)
+
+def Format(x):
original = x
negative = False
- if x == 0: return "0"
+ if x == 0: return "0n"
if x < 0:
negative = True
x = -x
s = ""
while x > 0:
- s = kChars[x % base] + s
- x = x / base
+ s = kChars[x % kBase] + s
+ x = x / kBase
+ s = "0x" + s + "n"
if negative:
s = "-" + s
- assert int(s, base) == original
+ assert Parse(s) == original
return s
class TestGenerator(object):
@@ -120,17 +127,16 @@ class UnaryOp(TestGenerator):
# Subclasses should not override anything below.
def EmitOne(self):
x_str = self.GenerateInput()
- x_num = int(x_str, kBase)
+ x_num = Parse(x_str)
result_num = self.GenerateResult(x_num)
- result_str = Format(result_num, kBase)
- return "{\n a: \"%s\",\n r: \"%s\"\n}" % (x_str, result_str)
+ result_str = Format(result_num)
+ return "{\n a: %s,\n r: %s\n}" % (x_str, result_str)
def EmitTestCore(self):
return """\
- var a = BigInt.parseInt(d.a, %(base)d);
- var r = %(op)sa;
- if (d.r !== r.toString(%(base)d)) {
- print("Input: " + a.toString(%(base)d));
+ var r = %(op)sd.a;
+ if (d.r !== r) {
+ print("Input: " + d.a.toString(%(base)d));
print("Result: " + r.toString(%(base)d));
print("Expected: " + d.r);
error_count++;
@@ -152,21 +158,19 @@ class BinaryOp(TestGenerator):
# Subclasses should not override anything below.
def EmitOne(self):
left_str, right_str = self.GenerateInputs()
- left_num = int(left_str, kBase)
- right_num = int(right_str, kBase)
+ left_num = Parse(left_str)
+ right_num = Parse(right_str)
result_num = self.GenerateResult(left_num, right_num)
- result_str = Format(result_num, kBase)
- return ("{\n a: \"%s\",\n b: \"%s\",\n r: \"%s\"\n}" %
+ result_str = Format(result_num)
+ return ("{\n a: %s,\n b: %s,\n r: %s\n}" %
(left_str, right_str, result_str))
def EmitTestCore(self):
return """\
- var a = BigInt.parseInt(d.a, %(base)d);
- var b = BigInt.parseInt(d.b, %(base)d);
- var r = a %(op)s b;
- if (d.r !== r.toString(%(base)d)) {
- print("Input A: " + a.toString(%(base)d));
- print("Input B: " + b.toString(%(base)d));
+ var r = d.a %(op)s d.b;
+ if (d.r !== r) {
+ print("Input A: " + d.a.toString(%(base)d));
+ print("Input B: " + d.b.toString(%(base)d));
print("Result: " + r.toString(%(base)d));
print("Expected: " + d.r);
print("Op: %(op)s");
diff --git a/deps/v8/tools/check-static-initializers.gyp b/deps/v8/tools/check-static-initializers.gyp
deleted file mode 100644
index cfeacfc89f..0000000000
--- a/deps/v8/tools/check-static-initializers.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'check_static_initializers_run',
- 'type': 'none',
- 'dependencies': [
- '../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'check-static-initializers.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/foozzie/BUILD.gn b/deps/v8/tools/clusterfuzz/BUILD.gn
index 532c1faca4..88219600a2 100644
--- a/deps/v8/tools/foozzie/BUILD.gn
+++ b/deps/v8/tools/clusterfuzz/BUILD.gn
@@ -9,6 +9,8 @@ if (v8_correctness_fuzzer) {
sources = [
"v8_commands.py",
"v8_foozzie.py",
+ "v8_foozzie_harness_adjust.js",
+ "v8_fuzz_config.py",
"v8_mock.js",
"v8_mock_archs.js",
"v8_suppressions.js",
diff --git a/deps/v8/tools/clusterfuzz/PRESUBMIT.py b/deps/v8/tools/clusterfuzz/PRESUBMIT.py
new file mode 100644
index 0000000000..0faeb0603c
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/PRESUBMIT.py
@@ -0,0 +1,8 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+def CheckChangeOnCommit(input_api, output_api):
+ tests = input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api, output_api, '.', whitelist=['v8_foozzie_test.py$'])
+ return input_api.RunTests(tests)
diff --git a/deps/v8/tools/foozzie/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index 85b1d7ab77..85b1d7ab77 100644
--- a/deps/v8/tools/foozzie/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
diff --git a/deps/v8/tools/foozzie/testdata/fuzz-123.js b/deps/v8/tools/clusterfuzz/testdata/fuzz-123.js
index fbde5736d4..fbde5736d4 100644
--- a/deps/v8/tools/foozzie/testdata/fuzz-123.js
+++ b/deps/v8/tools/clusterfuzz/testdata/fuzz-123.js
diff --git a/deps/v8/tools/foozzie/testdata/test_d8_1.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py
index 15a93fa535..15a93fa535 100644
--- a/deps/v8/tools/foozzie/testdata/test_d8_1.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_1.py
diff --git a/deps/v8/tools/foozzie/testdata/test_d8_2.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py
index f2bdacfaa1..f2bdacfaa1 100644
--- a/deps/v8/tools/foozzie/testdata/test_d8_2.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_2.py
diff --git a/deps/v8/tools/foozzie/testdata/test_d8_3.py b/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py
index a6c8682b2f..a6c8682b2f 100644
--- a/deps/v8/tools/foozzie/testdata/test_d8_3.py
+++ b/deps/v8/tools/clusterfuzz/testdata/test_d8_3.py
diff --git a/deps/v8/tools/foozzie/testdata/v8_build_config.json b/deps/v8/tools/clusterfuzz/testdata/v8_build_config.json
index ea27b1ccd7..ea27b1ccd7 100644
--- a/deps/v8/tools/foozzie/testdata/v8_build_config.json
+++ b/deps/v8/tools/clusterfuzz/testdata/v8_build_config.json
diff --git a/deps/v8/tools/foozzie/v8_commands.py b/deps/v8/tools/clusterfuzz/v8_commands.py
index 0b3cae722b..0b3cae722b 100644
--- a/deps/v8/tools/foozzie/v8_commands.py
+++ b/deps/v8/tools/clusterfuzz/v8_commands.py
diff --git a/deps/v8/tools/foozzie/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 9bb3512bcf..9bb3512bcf 100755
--- a/deps/v8/tools/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js b/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
new file mode 100644
index 0000000000..9509437827
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extensions to mjsunit and other test harnesses added between harness and
+// fuzzing code.
+
+try {
+ // Scope for utility functions.
+ (function() {
+ // Same as in mjsunit.js.
+ function classOf(object) {
+ // Argument must not be null or undefined.
+ var string = Object.prototype.toString.call(object);
+ // String has format [object <ClassName>].
+ return string.substring(8, string.length - 1);
+ }
+
+ // Override prettyPrinted with a version that also recusively prints object
+ // properties (with a depth of 3).
+ let origPrettyPrinted = this.prettyPrinted;
+ this.prettyPrinted = function prettyPrinted(value, depth=3) {
+ if (depth == 0) {
+ return "...";
+ }
+ switch (typeof value) {
+ case "object":
+ if (value === null) return "null";
+ var objectClass = classOf(value);
+ switch (objectClass) {
+ case "Object":
+ var name = value.constructor.name;
+ if (!name)
+ name = "Object";
+ return name + "{" + Object.keys(value).map(function(key, index) {
+ return (
+ prettyPrinted(key, depth - 1) +
+ ": " +
+ prettyPrinted(value[key], depth - 1)
+ );
+ }).join(",") + "}";
+ }
+ }
+ // Fall through to original version for all other types.
+ return origPrettyPrinted(value);
+ }
+
+ // We're not interested in stack traces.
+ this.MjsUnitAssertionError = function MjsUnitAssertionError(message) {}
+ MjsUnitAssertionError.prototype.toString = function () { return ""; };
+
+ // Do more printing in assertions for more correctness coverage.
+ this.failWithMessage = function failWithMessage(message) {
+ print(prettyPrinted(message))
+ }
+
+ this.fail = function fail(expectedText, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertSame = function assertSame(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertNotSame = function assertNotSame(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertEquals = function assertEquals(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertNotEquals = function assertNotEquals(expected, found, name_opt) {
+ print(prettyPrinted(found));
+ }
+
+ this.assertNull = function assertNull(value, name_opt) {
+ print(prettyPrinted(value));
+ }
+
+ this.assertNotNull = function assertNotNull(value, name_opt) {
+ print(prettyPrinted(value));
+ }
+
+ // Suppress optimization status as it leads to false positives.
+ this.assertUnoptimized = function assertUnoptimized() {}
+
+ this.assertOptimized = function assertOptimized() {}
+
+ this.isNeverOptimize = function isNeverOptimize() {}
+
+ this.isAlwaysOptimize = function isAlwaysOptimize() {}
+
+ this.isInterpreted = function isInterpreted() {}
+
+ this.isOptimized = function isOptimized() {}
+
+ this.isTurboFanned = function isTurboFanned() {}
+ })();
+} catch(e) { }
diff --git a/deps/v8/tools/foozzie/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index ffe18a88d5..3b95111271 100644..100755
--- a/deps/v8/tools/foozzie/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,12 +9,43 @@ import sys
import unittest
import v8_foozzie
+import v8_fuzz_config
import v8_suppressions
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')
TEST_DATA = os.path.join(BASE_DIR, 'testdata')
+
+class ConfigTest(unittest.TestCase):
+ def testExperiments(self):
+ """Test that probabilities add up to 100 and that all config names exist.
+ """
+ EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS
+ CONFIGS = v8_foozzie.CONFIGS
+ assert sum(x[0] for x in EXPERIMENTS) == 100
+ assert all(map(lambda x: x[1] in CONFIGS, EXPERIMENTS))
+ assert all(map(lambda x: x[2] in CONFIGS, EXPERIMENTS))
+ assert all(map(lambda x: x[3].endswith('d8'), EXPERIMENTS))
+
+ def testConfig(self):
+ """Smoke test how to choose experiments.
+
+ When experiment distribution changes this test might change, too.
+ """
+ class Rng(object):
+ def random(self):
+ return 0.5
+ self.assertEqual(
+ [
+ '--first-config=ignition',
+ '--second-config=ignition_turbo',
+ '--second-d8=d8',
+ ],
+ v8_fuzz_config.Config('foo', Rng()).choose_foozzie_flags(),
+ )
+
+
class UnitTest(unittest.TestCase):
def testDiff(self):
# TODO(machenbach): Mock out suppression configuration.
@@ -109,3 +141,7 @@ class SystemTest(unittest.TestCase):
e = ctx.exception
self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEquals(expected_output, cut_verbose_output(e.output))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
new file mode 100644
index 0000000000..8cc1939e38
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -0,0 +1,45 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+# List of configuration experiments for correctness fuzzing.
+# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
+# Probabilities must add up to 100.
+FOOZZIE_EXPERIMENTS = [
+ [5, 'ignition', 'ignition_asm', 'd8'],
+ [5, 'ignition', 'trusted', 'd8'],
+ [5, 'ignition', 'trusted_opt', 'd8'],
+ [10, 'ignition', 'slow_path', 'd8'],
+ [5, 'ignition', 'slow_path_opt', 'd8'],
+ [25, 'ignition', 'ignition_turbo', 'd8'],
+ [20, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
+ [5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
+ [5, 'ignition', 'ignition', 'clang_x86/d8'],
+ [5, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
+ [5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
+]
+
+class Config(object):
+ def __init__(self, name, rng=None):
+ self.name = name
+ self.rng = rng or random.Random()
+
+ def choose_foozzie_flags(self):
+ """Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
+
+ Returns: List of flags to pass to v8_foozzie.py fuzz harness.
+ """
+ acc = 0
+ threshold = self.rng.random() * 100
+ for prob, first_config, second_config, second_d8 in FOOZZIE_EXPERIMENTS:
+ acc += prob
+ if acc > threshold:
+ return [
+ '--first-config=' + first_config,
+ '--second-config=' + second_config,
+ '--second-d8=' + second_d8,
+ ]
+ assert False
diff --git a/deps/v8/tools/foozzie/v8_mock.js b/deps/v8/tools/clusterfuzz/v8_mock.js
index 5d15304cd7..2f797dd89f 100644
--- a/deps/v8/tools/foozzie/v8_mock.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock.js
@@ -10,7 +10,7 @@
// This will be overridden in the test cases. The override can be minimized.
-var __PrettyPrint = function __PrettyPrint(msg) { print(msg); };
+var prettyPrinted = function prettyPrinted(msg) { return msg; };
// Mock Math.random.
(function () {
@@ -69,6 +69,9 @@ var __PrettyPrint = function __PrettyPrint(msg) { print(msg); };
if (property == "now") {
return mockDateNow;
}
+ if (property == "prototype") {
+ return origDate.prototype
+ }
},
}
@@ -121,16 +124,16 @@ Object.defineProperty(
];
Worker = function(code){
try {
- __PrettyPrint(eval(code));
+ print(prettyPrinted(eval(code)));
} catch(e) {
- __PrettyPrint(e);
+ print(prettyPrinted(e));
}
this.getMessage = function(){
index = (index + 1) % 10;
return workerMessages[index];
}
this.postMessage = function(msg){
- __PrettyPrint(msg);
+ print(prettyPrinted(msg));
}
};
})();
diff --git a/deps/v8/tools/foozzie/v8_mock_archs.js b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
index 507f31a3a2..507f31a3a2 100644
--- a/deps/v8/tools/foozzie/v8_mock_archs.js
+++ b/deps/v8/tools/clusterfuzz/v8_mock_archs.js
diff --git a/deps/v8/tools/foozzie/v8_suppressions.js b/deps/v8/tools/clusterfuzz/v8_suppressions.js
index 011e7272ba..011e7272ba 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.js
+++ b/deps/v8/tools/clusterfuzz/v8_suppressions.js
diff --git a/deps/v8/tools/foozzie/v8_suppressions.py b/deps/v8/tools/clusterfuzz/v8_suppressions.py
index 87b1972e94..04f67b2cf9 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.py
+++ b/deps/v8/tools/clusterfuzz/v8_suppressions.py
@@ -72,14 +72,6 @@ IGNORE_SOURCES = {
# label.
# Regular expressions are assumed to be compiled. We use regexp.search.
IGNORE_TEST_CASES = {
- 'slow_path': {
- 'crbug.com/800651':
- re.compile(r'async', re.S),
- },
- 'slow_path_opt': {
- 'crbug.com/800651':
- re.compile(r'async', re.S),
- },
}
# Ignore by output pattern. Map from config->bug->regexp. See IGNORE_TEST_CASES
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index c3dab0a870..6dfd46bf7b 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -20,15 +20,12 @@ All arguments are optional. Most combinations should work, e.g.:
from __future__ import print_function
import errno
-import multiprocessing
import os
import pty
import re
import subprocess
import sys
-BUILD_OPTS_DEFAULT = ""
-BUILD_OPTS_GOMA = "-j1000 -l%d" % (multiprocessing.cpu_count() + 2)
BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
BUILD_TARGETS_ALL = ["all"]
@@ -231,11 +228,6 @@ class Config(object):
arch_specific = self.GetTargetCpu() + self.GetV8TargetCpu()
return template % arch_specific
- def WantsGoma(self):
- output = _CallWithOutputNoTerminal(
- "gn args --short --list=use_goma %s" % (GetPath(self.arch, self.mode)))
- return "true" in output
-
def Build(self):
path = GetPath(self.arch, self.mode)
args_gn = os.path.join(path, "args.gn")
@@ -247,14 +239,13 @@ class Config(object):
code = _Call("gn gen %s" % path)
if code != 0: return code
targets = " ".join(self.targets)
- build_opts = BUILD_OPTS_GOMA if self.WantsGoma() else BUILD_OPTS_DEFAULT
# The implementation of mksnapshot failure detection relies on
# the "pty" module and GDB presence, so skip it on non-Linux.
if "linux" not in sys.platform:
- return _Call("ninja -C %s %s %s" % (path, build_opts, targets))
+ return _Call("autoninja -C %s %s" % (path, targets))
- return_code, output = _CallWithOutput("ninja -C %s %s %s" %
- (path, build_opts, targets))
+ return_code, output = _CallWithOutput("autoninja -C %s %s" %
+ (path, targets))
if return_code != 0 and "FAILED: gen/snapshot.cc" in output:
csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
match = csa_trap.search(output)
diff --git a/deps/v8/tools/gcmole/download_gcmole_tools.py b/deps/v8/tools/gcmole/download_gcmole_tools.py
deleted file mode 100755
index af27723da6..0000000000
--- a/deps/v8/tools/gcmole/download_gcmole_tools.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import re
-import subprocess
-
-GCMOLE_PATH = os.path.dirname(os.path.abspath(__file__))
-SHA1_PATH = os.path.join(GCMOLE_PATH, 'gcmole-tools.tar.gz.sha1')
-
-if re.search(r'\bgcmole=1', os.environ.get('GYP_DEFINES', '')):
- subprocess.check_call([
- 'download_from_google_storage',
- '-b', 'chrome-v8-gcmole',
- '-u', '--no_resume',
- '-s', SHA1_PATH,
- '--platform=linux*'
- ])
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index 862b7b0247..d832041361 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -181,34 +181,6 @@ function InvokeClangPluginForEachFile(filenames, cfg, func)
end
-------------------------------------------------------------------------------
--- GYP file parsing
-
--- TODO(machenbach): Remove this when deprecating gyp.
-local function ParseGYPFile()
- local result = {}
- local gyp_files = {
- { "src/v8.gyp", "'([^']-%.cc)'", "src/" },
- { "test/cctest/cctest.gyp", "'(test-[^']-%.cc)'", "test/cctest/" }
- }
-
- for i = 1, #gyp_files do
- local filename = gyp_files[i][1]
- local pattern = gyp_files[i][2]
- local prefix = gyp_files[i][3]
- local gyp_file = assert(io.open(filename), "failed to open GYP file")
- local gyp = gyp_file:read('*a')
- for condition, sources in
- gyp:gmatch "%[.-### gcmole%((.-)%) ###(.-)%]" do
- if result[condition] == nil then result[condition] = {} end
- for file in sources:gmatch(pattern) do
- table.insert(result[condition], prefix .. file)
- end
- end
- gyp_file:close()
- end
-
- return result
-end
local function ParseGNFile()
local result = {}
@@ -258,34 +230,8 @@ local function BuildFileList(sources, props)
end
-local gyp_sources = ParseGYPFile()
local gn_sources = ParseGNFile()
--- TODO(machenbach): Remove this comparison logic when deprecating gyp.
-local function CompareSources(sources1, sources2, what)
- for condition, files1 in pairs(sources1) do
- local files2 = sources2[condition]
- assert(
- files2 ~= nil,
- "Missing gcmole condition in " .. what .. ": " .. condition)
-
- -- Turn into set for speed.
- files2_set = {}
- for i, file in pairs(files2) do files2_set[file] = true end
-
- for i, file in pairs(files1) do
- assert(
- files2_set[file] ~= nil,
- "Missing file " .. file .. " in " .. what .. " for condition " ..
- condition)
- end
- end
-end
-
-CompareSources(gyp_sources, gn_sources, "GN")
-CompareSources(gn_sources, gyp_sources, "GYP")
-
-
local function FilesForArch(arch)
return BuildFileList(gn_sources, { os = 'linux',
arch = arch,
diff --git a/deps/v8/tools/gcmole/run_gcmole.gyp b/deps/v8/tools/gcmole/run_gcmole.gyp
deleted file mode 100644
index 7d206bf412..0000000000
--- a/deps/v8/tools/gcmole/run_gcmole.gyp
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'run_gcmole_run',
- 'type': 'none',
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'run-gcmole.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/gcov.sh b/deps/v8/tools/gcov.sh
deleted file mode 100755
index d7277043d4..0000000000
--- a/deps/v8/tools/gcov.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Build and collect code coverage data, cummulatively, on specified architectures.
-
-BUILD_TYPE=${BUILD_TYPE:-Release}
-
-declare -A modes=( [Release]=release [Debug]=debug )
-declare -A pairs=( [arm]=ia32 [arm64]=x64 [ia32]=ia32 [x64]=x64 )
-
-if [ -z ${modes[$BUILD_TYPE]} ]
-then
- echo "BUILD_TYPE must be {<unspecified>|Release|Debug}"
- echo "Release is default"
- exit
-fi
-
-mode=${modes[$BUILD_TYPE]}
-
-echo "Using build:" $BUILD_TYPE
-v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
-work_dir=$v8_root/cov
-build_dir=$work_dir/$BUILD_TYPE
-
-if [ -z $@ ]
-then
- echo "Pass at least one target architecture"
- echo "Supported architectures: x64 ia32 arm arm64"
- echo ""
- echo "Example: ./tools/gcov.sh x64 arm"
- echo ""
- echo "Optionally, set BUILD_TYPE env variable to"
- echo "either Debug or Release, to use the corresponding build."
- echo "By default, BUILD_TYPE is Release."
- echo ""
- echo "Example: BUILD_TYPE=Debug ./tools/gcov.sh x64 arm"
- echo ""
- exit
-fi
-
-lcov --directory=$build_dir --zerocounters
-
-# Mapping v8 build terminology to gnu compiler terminology:
-# target_arch is the host, and
-# v8_target_arch is the target
-
-for v8_target_arch in "$@"
-do
- target_arch=${pairs[$v8_target_arch]}
- if [ -z $target_arch ]
- then
- echo "Skipping unknown architecture: " $v8_target_arch
- else
- echo "Building" $v8_target_arch
- GYP_DEFINES="component=static_library use_goma=1 target_arch=$target_arch v8_target_arch=$v8_target_arch coverage=1 clang=0" python $v8_root/gypfiles/gyp_v8.py -G output_dir=$work_dir
- ninja -C $build_dir -j2000
- $v8_root/tools/run-tests.py --gcov-coverage --arch=$v8_target_arch --mode=$mode --shell-dir=$build_dir --variants=exhaustive
- fi
-done
-
-lcov --directory=$build_dir --capture --output-file $work_dir/app.info
-genhtml --output-directory $work_dir/html $work_dir/app.info
-echo "Done"
-echo "Output available at: " $work_dir/html/index.html
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 043ecc306d..b12809739a 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -226,6 +226,11 @@ consts_misc = [
'value': 'NumberDictionaryShape::kPrefixSize' },
{ 'name': 'numberdictionaryshape_entry_size',
'value': 'NumberDictionaryShape::kEntrySize' },
+
+ { 'name': 'simplenumberdictionaryshape_prefix_size',
+ 'value': 'SimpleNumberDictionaryShape::kPrefixSize' },
+ { 'name': 'simplenumberdictionaryshape_entry_size',
+ 'value': 'SimpleNumberDictionaryShape::kEntrySize' },
];
#
diff --git a/deps/v8/gypfiles/get_landmines.py b/deps/v8/tools/get_landmines.py
index 6137648e6d..ff4831dff5 100755
--- a/deps/v8/gypfiles/get_landmines.py
+++ b/deps/v8/tools/get_landmines.py
@@ -11,10 +11,16 @@ This file emits the list of reasons why a particular build needs to be clobbered
import sys
-def main():
+def print_landmines(): # pylint: disable=invalid-name
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
+ # DO NOT add landmines as part of a regular CL. Landmines are a last-effort
+ # bandaid fix if a CL that got landed has a build dependency bug and all bots
+ # need to be cleaned up. If you're writing a new CL that causes build
+ # dependency problems, fix the dependency problems instead of adding a
+ # landmine.
+ # See the Chromium version in src/build/get_landmines.py for usage examples.
print 'Need to clobber after ICU52 roll.'
print 'Landmines test.'
print 'Activating MSVS 2013.'
@@ -35,5 +41,10 @@ def main():
return 0
+def main():
+ print_landmines()
+ return 0
+
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 5d9ffff607..570ef92118 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -185,6 +185,10 @@ MAGIC_MARKER_PAIRS = (
(0xbbbbbbbb, 0xbbbbbbbb),
(0xfefefefe, 0xfefefeff),
)
+# See StackTraceFailureMessage in isolate.h
+STACK_TRACE_MARKER = 0xdecade30
+# See FailureMessage in logging.cc
+ERROR_MESSAGE_MARKER = 0xdecade10
# Set of structures and constants that describe the layout of minidump
# files. Based on MSDN and Google Breakpad.
@@ -2105,11 +2109,9 @@ class InspectionPadawan(object):
"""
# Only look at the first 1k words on the stack
ptr_size = self.reader.PointerSize()
- if start is None:
- start = self.reader.ExceptionSP()
+ if start is None: start = self.reader.ExceptionSP()
if not self.reader.IsValidAddress(start): return start
end = start + ptr_size * 1024 * 4
- message_start = 0
magic1 = None
for slot in xrange(start, end, ptr_size):
if not self.reader.IsValidAddress(slot + ptr_size): break
@@ -2117,10 +2119,64 @@ class InspectionPadawan(object):
magic2 = self.reader.ReadUIntPtr(slot + ptr_size)
pair = (magic1 & 0xFFFFFFFF, magic2 & 0xFFFFFFFF)
if pair in MAGIC_MARKER_PAIRS:
- message_slot = slot + ptr_size * 4
- message_start = self.reader.ReadUIntPtr(message_slot)
- break
- if message_start == 0:
+ return self.TryExtractOldStyleStackTrace(slot, start, end,
+ print_message)
+ if pair[0] == STACK_TRACE_MARKER:
+ return self.TryExtractStackTrace(slot, start, end, print_message)
+ elif pair[0] == ERROR_MESSAGE_MARKER:
+ return self.TryExtractErrorMessage(slot, start, end, print_message)
+ # Simple fallback in case not stack trace object was found
+ return self.TryExtractOldStyleStackTrace(0, start, end,
+ print_message)
+
+ def TryExtractStackTrace(self, slot, start, end, print_message):
+ ptr_size = self.reader.PointerSize()
+ assert self.reader.ReadUIntPtr(slot) & 0xFFFFFFFF == STACK_TRACE_MARKER
+ end_marker = STACK_TRACE_MARKER + 1;
+ header_size = 10
+ # Look for the end marker after the fields and the message buffer.
+ end_search = start + (32 * 1024) + (header_size * ptr_size);
+ end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+ if not end_slot: return start
+ print "Stack Message (start=%s):" % self.heap.FormatIntPtr(slot)
+ slot += ptr_size
+ for name in ("isolate","ptr1", "ptr2", "ptr3", "ptr4", "codeObject1",
+ "codeObject2", "codeObject3", "codeObject4"):
+ value = self.reader.ReadUIntPtr(slot)
+ print " %s: %s" % (name.rjust(14), self.heap.FormatIntPtr(value))
+ slot += ptr_size
+ print " message start: %s" % self.heap.FormatIntPtr(slot)
+ stack_start = end_slot + ptr_size
+ print " stack_start: %s" % self.heap.FormatIntPtr(stack_start)
+ (message_start, message) = self.FindFirstAsciiString(slot)
+ self.FormatStackTrace(message, print_message)
+ return stack_start
+
+ def FindPtr(self, expected_value, start, end):
+ ptr_size = self.reader.PointerSize()
+ for slot in xrange(start, end, ptr_size):
+ if not self.reader.IsValidAddress(slot): return None
+ value = self.reader.ReadUIntPtr(slot)
+ if value == expected_value: return slot
+ return None
+
+ def TryExtractErrorMessage(self, slot, start, end, print_message):
+ end_marker = ERROR_MESSAGE_MARKER + 1;
+ header_size = 1
+ end_search = start + 1024 + (header_size * ptr_size);
+ end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+ if not end_slot: return start
+ print "Error Message (start=%s):" % self.heap.FormatIntPtr(slot)
+ slot += ptr_size
+ (message_start, message) = self.FindFirstAsciiString(slot)
+ self.FormatStackTrace(message, print_message)
+ stack_start = end_slot + ptr_size
+ return stack_start
+
+ def TryExtractOldStyleStackTrace(self, message_slot, start, end,
+ print_message):
+ ptr_size = self.reader.PointerSize()
+ if message_slot == 0:
"""
On Mac we don't always get proper magic markers, so just try printing
the first long ascii string found on the stack.
@@ -2130,6 +2186,7 @@ class InspectionPadawan(object):
message_start, message = self.FindFirstAsciiString(start, end, 128)
if message_start is None: return start
else:
+ message_start = self.reader.ReadUIntPtr(message_slot + ptr_size * 4)
message = self.reader.ReadAsciiString(message_start)
stack_start = message_start + len(message) + 1
# Make sure the address is word aligned
@@ -2149,10 +2206,15 @@ class InspectionPadawan(object):
print " message start: %s" % self.heap.FormatIntPtr(message_start)
print " stack_start: %s" % self.heap.FormatIntPtr(stack_start )
print ""
+ self.FormatStackTrace(message, print_message)
+ return stack_start
+
+ def FormatStackTrace(self, message, print_message):
if not print_message:
print " Use `dsa` to print the message with annotated addresses."
print ""
- return stack_start
+ return
+ ptr_size = self.reader.PointerSize()
# Annotate all addresses in the dumped message
prog = re.compile("[0-9a-fA-F]{%s}" % ptr_size*2)
addresses = list(set(prog.findall(message)))
@@ -2166,7 +2228,7 @@ class InspectionPadawan(object):
print message
print "="*80
print ""
- return stack_start
+
def TryInferFramePointer(self, slot, address):
""" Assume we have a framepointer if we find 4 consecutive links """
diff --git a/deps/v8/tools/gyp_flag_compare.py b/deps/v8/tools/gyp_flag_compare.py
deleted file mode 100755
index 86fa5c4098..0000000000
--- a/deps/v8/tools/gyp_flag_compare.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Given the output of -t commands from a ninja build for a gyp and GN generated
-build, report on differences between the command lines."""
-
-
-import os
-import shlex
-import subprocess
-import sys
-
-
-# Must be in v8/.
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-os.chdir(BASE_DIR)
-
-
-g_total_differences = 0
-
-
-def FindAndRemoveArgWithValue(command_line, argname):
- """Given a command line as a list, remove and return the value of an option
- that takes a value as a separate entry.
-
- Modifies |command_line| in place.
- """
- if argname not in command_line:
- return ''
- location = command_line.index(argname)
- value = command_line[location + 1]
- command_line[location:location + 2] = []
- return value
-
-
-def MergeSpacedArgs(command_line, argname):
- """Combine all arguments |argname| with their values, separated by a space."""
- i = 0
- result = []
- while i < len(command_line):
- arg = command_line[i]
- if arg == argname:
- result.append(arg + ' ' + command_line[i + 1])
- i += 1
- else:
- result.append(arg)
- i += 1
- return result
-
-
-def NormalizeSymbolArguments(command_line):
- """Normalize -g arguments.
-
- If there's no -g args, it's equivalent to -g0. -g2 is equivalent to -g.
- Modifies |command_line| in place.
- """
- # Strip -g0 if there's no symbols.
- have_some_symbols = False
- for x in command_line:
- if x.startswith('-g') and x != '-g0':
- have_some_symbols = True
- if not have_some_symbols and '-g0' in command_line:
- command_line.remove('-g0')
-
- # Rename -g2 to -g.
- if '-g2' in command_line:
- command_line[command_line.index('-g2')] = '-g'
-
-
-def GetFlags(lines, build_dir):
- """Turn a list of command lines into a semi-structured dict."""
- is_win = sys.platform == 'win32'
- flags_by_output = {}
- for line in lines:
- command_line = shlex.split(line.strip(), posix=not is_win)[1:]
-
- output_name = FindAndRemoveArgWithValue(command_line, '-o')
- dep_name = FindAndRemoveArgWithValue(command_line, '-MF')
-
- NormalizeSymbolArguments(command_line)
-
- command_line = MergeSpacedArgs(command_line, '-Xclang')
-
- cc_file = [x for x in command_line if x.endswith('.cc') or
- x.endswith('.c') or
- x.endswith('.cpp')]
- if len(cc_file) != 1:
- print 'Skipping %s' % command_line
- continue
- assert len(cc_file) == 1
-
- if is_win:
- rsp_file = [x for x in command_line if x.endswith('.rsp')]
- assert len(rsp_file) <= 1
- if rsp_file:
- rsp_file = os.path.join(build_dir, rsp_file[0][1:])
- with open(rsp_file, "r") as open_rsp_file:
- command_line = shlex.split(open_rsp_file, posix=False)
-
- defines = [x for x in command_line if x.startswith('-D')]
- include_dirs = [x for x in command_line if x.startswith('-I')]
- dash_f = [x for x in command_line if x.startswith('-f')]
- warnings = \
- [x for x in command_line if x.startswith('/wd' if is_win else '-W')]
- others = [x for x in command_line if x not in defines and \
- x not in include_dirs and \
- x not in dash_f and \
- x not in warnings and \
- x not in cc_file]
-
- for index, value in enumerate(include_dirs):
- if value == '-Igen':
- continue
- path = value[2:]
- if not os.path.isabs(path):
- path = os.path.join(build_dir, path)
- include_dirs[index] = '-I' + os.path.normpath(path)
-
- # GYP supports paths above the source root like <(DEPTH)/../foo while such
- # paths are unsupported by gn. But gn allows to use system-absolute paths
- # instead (paths that start with single '/'). Normalize all paths.
- cc_file = [os.path.normpath(os.path.join(build_dir, cc_file[0]))]
-
- # Filter for libFindBadConstructs.so having a relative path in one and
- # absolute path in the other.
- others_filtered = []
- for x in others:
- if x.startswith('-Xclang ') and x.endswith('libFindBadConstructs.so'):
- others_filtered.append(
- '-Xclang ' +
- os.path.join(os.getcwd(),
- os.path.normpath(
- os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
- elif x.startswith('-B'):
- others_filtered.append(
- '-B' +
- os.path.join(os.getcwd(),
- os.path.normpath(os.path.join('out/gn_flags', x[2:]))))
- else:
- others_filtered.append(x)
- others = others_filtered
-
- flags_by_output[cc_file[0]] = {
- 'output': output_name,
- 'depname': dep_name,
- 'defines': sorted(defines),
- 'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
- 'dash_f': sorted(dash_f),
- 'warnings': sorted(warnings),
- 'other': sorted(others),
- }
- return flags_by_output
-
-
-def CompareLists(gyp, gn, name, dont_care_gyp=None, dont_care_gn=None):
- """Return a report of any differences between gyp and gn lists, ignoring
- anything in |dont_care_{gyp|gn}| respectively."""
- global g_total_differences
- if not dont_care_gyp:
- dont_care_gyp = []
- if not dont_care_gn:
- dont_care_gn = []
- output = ''
- if gyp[name] != gn[name]:
- gyp_set = set(gyp[name])
- gn_set = set(gn[name])
- missing_in_gyp = gyp_set - gn_set
- missing_in_gn = gn_set - gyp_set
- missing_in_gyp -= set(dont_care_gyp)
- missing_in_gn -= set(dont_care_gn)
- if missing_in_gyp or missing_in_gn:
- output += ' %s differ:\n' % name
- if missing_in_gyp:
- output += ' In gyp, but not in GN:\n %s' % '\n '.join(
- sorted(missing_in_gyp)) + '\n'
- g_total_differences += len(missing_in_gyp)
- if missing_in_gn:
- output += ' In GN, but not in gyp:\n %s' % '\n '.join(
- sorted(missing_in_gn)) + '\n\n'
- g_total_differences += len(missing_in_gn)
- return output
-
-
-def Run(command_line):
- """Run |command_line| as a subprocess and return stdout. Raises on error."""
- try:
- return subprocess.check_output(command_line, shell=True)
- except subprocess.CalledProcessError as e:
- # Rescue the output we got until the exception happened.
- print '#### Stdout: ####################################################'
- print e.output
- print '#################################################################'
- raise
-
-
-def main():
- if len(sys.argv) < 4:
- print ('usage: %s gn_outdir gyp_outdir gn_target '
- '[gyp_target1, gyp_target2, ...]' % __file__)
- return 1
-
- if len(sys.argv) == 4:
- sys.argv.append(sys.argv[3])
- gn_out_dir = sys.argv[1]
- print >> sys.stderr, 'Expecting gn outdir in %s...' % gn_out_dir
- gn = Run('ninja -C %s -t commands %s' % (gn_out_dir, sys.argv[3]))
- if sys.platform == 'win32':
- # On Windows flags are stored in .rsp files which are created during build.
- print >> sys.stderr, 'Building in %s...' % gn_out_dir
- Run('ninja -C %s -d keeprsp %s' % (gn_out_dir, sys.argv[3]))
-
- gyp_out_dir = sys.argv[2]
- print >> sys.stderr, 'Expecting gyp outdir in %s...' % gyp_out_dir
- gyp = Run('ninja -C %s -t commands %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
- if sys.platform == 'win32':
- # On Windows flags are stored in .rsp files which are created during build.
- print >> sys.stderr, 'Building in %s...' % gyp_out_dir
- Run('ninja -C %s -d keeprsp %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
-
- all_gyp_flags = GetFlags(gyp.splitlines(),
- os.path.join(os.getcwd(), gyp_out_dir))
- all_gn_flags = GetFlags(gn.splitlines(),
- os.path.join(os.getcwd(), gn_out_dir))
- gyp_files = set(all_gyp_flags.keys())
- gn_files = set(all_gn_flags.keys())
- different_source_list = gyp_files != gn_files
- if different_source_list:
- print 'Different set of sources files:'
- print ' In gyp, not in GN:\n %s' % '\n '.join(
- sorted(gyp_files - gn_files))
- print ' In GN, not in gyp:\n %s' % '\n '.join(
- sorted(gn_files - gyp_files))
- print '\nNote that flags will only be compared for files in both sets.\n'
- file_list = gyp_files & gn_files
- files_with_given_differences = {}
- for filename in sorted(file_list):
- gyp_flags = all_gyp_flags[filename]
- gn_flags = all_gn_flags[filename]
- differences = CompareLists(gyp_flags, gn_flags, 'dash_f')
- differences += CompareLists(gyp_flags, gn_flags, 'defines')
- differences += CompareLists(gyp_flags, gn_flags, 'include_dirs',
- ['-I%s' % os.path.dirname(BASE_DIR)])
- differences += CompareLists(gyp_flags, gn_flags, 'warnings',
- # More conservative warnings in GN we consider to be OK.
- dont_care_gyp=[
- '/wd4091', # 'keyword' : ignored on left of 'type' when no variable
- # is declared.
- '/wd4456', # Declaration hides previous local declaration.
- '/wd4457', # Declaration hides function parameter.
- '/wd4458', # Declaration hides class member.
- '/wd4459', # Declaration hides global declaration.
- '/wd4702', # Unreachable code.
- '/wd4800', # Forcing value to bool 'true' or 'false'.
- '/wd4838', # Conversion from 'type' to 'type' requires a narrowing
- # conversion.
- ] if sys.platform == 'win32' else None,
- dont_care_gn=[
- '-Wendif-labels',
- '-Wextra',
- '-Wsign-compare',
- ] if not sys.platform == 'win32' else None)
- differences += CompareLists(gyp_flags, gn_flags, 'other')
- if differences:
- files_with_given_differences.setdefault(differences, []).append(filename)
-
- for diff, files in files_with_given_differences.iteritems():
- print '\n'.join(sorted(files))
- print diff
-
- print 'Total differences:', g_total_differences
- # TODO(scottmg): Return failure on difference once we're closer to identical.
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/deps/v8/tools/heap-stats/README.md b/deps/v8/tools/heap-stats/README.md
index 70083fe257..9cf6e5673c 100644
--- a/deps/v8/tools/heap-stats/README.md
+++ b/deps/v8/tools/heap-stats/README.md
@@ -6,8 +6,9 @@ maintaining internal state versus actually allocated by the user.
The tool consumes log files produced by d8 (or Chromium) by passing
`--trace-gc-object-stats` or a trace captured using Chrome's tracing
-infrastructure. Chrome trace files need to be unpacked before they can
-be used though.
+infrastructure. Chrome trace files can either be processed as gzip or raw text
+files.
+
Hosting requires a web server, e.g.:
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index 0a836d5f6c..16a4b53e49 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -6,15 +6,10 @@
const CATEGORIES = new Map([
[
'user', new Set([
- '*FIXED_ARRAY_CONTEXT_SUB_TYPE',
- '*FIXED_ARRAY_COPY_ON_WRITE_SUB_TYPE',
- '*FIXED_ARRAY_DICTIONARY_PROPERTIES_SUB_TYPE',
- '*FIXED_ARRAY_JS_COLLECTION_SUB_TYPE',
- '*FIXED_ARRAY_JS_WEAK_COLLECTION_SUB_TYPE',
- '*FIXED_ARRAY_PACKED_ELEMENTS_SUB_TYPE',
'CONS_ONE_BYTE_STRING_TYPE',
'CONS_STRING_TYPE',
'DESCRIPTOR_ARRAY_TYPE',
+ 'ELEMENTS_TYPE',
'EXTERNAL_INTERNALIZED_STRING_TYPE',
'EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
'EXTERNAL_ONE_BYTE_STRING_TYPE',
@@ -30,6 +25,9 @@ const CATEGORIES = new Map([
'FIXED_UINT32_ARRAY_TYPE',
'FIXED_UINT8_ARRAY_TYPE',
'FIXED_UINT8_CLAMPED_ARRAY_TYPE',
+ 'FUNCTION_CONTEXT_TYPE',
+ 'GLOBAL_ELEMENTS_TYPE',
+ 'GLOBAL_PROPERTIES_TYPE',
'HEAP_NUMBER_TYPE',
'INTERNALIZED_STRING_TYPE',
'JS_ARGUMENTS_TYPE',
@@ -56,13 +54,17 @@ const CATEGORIES = new Map([
'JS_PROMISE_TYPE',
'JS_REGEXP_TYPE',
'JS_SET_TYPE',
+ 'JS_SET_VALUE_ITERATOR_TYPE',
'JS_STRING_ITERATOR_TYPE',
+ 'JS_TO_WASM_FUNCTION',
'JS_TYPED_ARRAY_TYPE',
'JS_VALUE_TYPE',
'JS_WEAK_MAP_TYPE',
'MUTABLE_HEAP_NUMBER_TYPE',
+ 'NATIVE_CONTEXT_TYPE',
'ONE_BYTE_INTERNALIZED_STRING_TYPE',
'ONE_BYTE_STRING_TYPE',
+ 'OBJECT_PROPERTY_DICTIONARY_TYPE',
'PROPERTY_ARRAY_TYPE',
'SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE',
'SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
@@ -74,6 +76,9 @@ const CATEGORIES = new Map([
'SYMBOL_TYPE',
'THIN_ONE_BYTE_STRING_TYPE',
'THIN_STRING_TYPE',
+ 'WASM_INSTANCE_TYPE',
+ 'WASM_MEMORY_TYPE',
+ 'WASM_MODULE_TYPE',
])
],
[
@@ -84,26 +89,16 @@ const CATEGORIES = new Map([
'ALLOCATION_MEMENTO_TYPE',
'ALLOCATION_SITE_TYPE',
'BOILERPLATE_ELEMENTS_TYPE',
- 'BOILERPLATE_NAME_DICTIONARY_TYPE',
'BOILERPLATE_PROPERTY_ARRAY_TYPE',
+ 'BOILERPLATE_PROPERTY_DICTIONARY_TYPE',
'BYTE_ARRAY_TYPE',
'CELL_TYPE',
+ 'CODE_STUBS_TABLE_TYPE',
'CONTEXT_EXTENSION_TYPE',
- '*FIXED_ARRAY_DEPENDENT_CODE_SUB_TYPE',
- '*FIXED_ARRAY_ENUM_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_ENUM_INDICES_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_NUMBER_STRING_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_PROTOTYPE_USERS_SUB_TYPE',
- '*FIXED_ARRAY_REGEXP_MULTIPLE_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_RETAINED_MAPS_SUB_TYPE',
- '*FIXED_ARRAY_SCOPE_INFO_SUB_TYPE',
- '*FIXED_ARRAY_SCRIPT_LIST_SUB_TYPE',
- '*FIXED_ARRAY_SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_STRING_SPLIT_CACHE_SUB_TYPE',
- '*FIXED_ARRAY_TEMPLATE_INFO_SUB_TYPE',
- '*FIXED_ARRAY_WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE',
+ 'ENUM_CACHE_TYPE',
+ 'ENUM_INDICES_CACHE_TYPE',
'FOREIGN_TYPE',
+ 'FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE',
'FUNCTION_TEMPLATE_INFO_TYPE',
'INTERCEPTOR_INFO_TYPE',
'JS_API_OBJECT_TYPE',
@@ -111,41 +106,65 @@ const CATEGORIES = new Map([
'JS_OBJECT_BOILERPLATE_TYPE',
'JS_SPECIAL_API_OBJECT_TYPE',
'MAP_TYPE',
+ 'NUMBER_STRING_CACHE_TYPE',
'OBJECT_TEMPLATE_INFO_TYPE',
+ 'OBJECT_TO_CODE_TYPE',
'ODDBALL_TYPE',
'PROMISE_REACTION_JOB_INFO_TYPE',
'PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE',
'PROPERTY_CELL_TYPE',
'PROTOTYPE_INFO_TYPE',
+ 'PROTOTYPE_USERS_TYPE',
+ 'REGEXP_MULTIPLE_CACHE_TYPE',
+ 'RETAINED_MAPS_TYPE',
+ 'SCOPE_INFO_TYPE',
+ 'SCRIPT_LIST_TYPE',
+ 'SCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+ 'SERIALIZED_OBJECTS_TYPE',
+ 'SINGLE_CHARACTER_STRING_CACHE_TYPE',
'STACK_FRAME_INFO_TYPE',
+ 'STRING_SPLIT_CACHE_TYPE',
+ 'STRING_TABLE_TYPE',
'TRANSITION_ARRAY_TYPE',
'WEAK_CELL_TYPE',
+ 'WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE',
])
],
[
'code', new Set([
- '*CODE_BUILTIN',
- '*CODE_BYTECODE_HANDLER',
- '*CODE_OPTIMIZED_FUNCTION',
- '*CODE_REGEXP',
- '*CODE_STUB',
- '*FIXED_ARRAY_BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE',
- '*FIXED_ARRAY_BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_CODE_STUBS_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_COMPILATION_CACHE_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_DEOPTIMIZATION_DATA_SUB_TYPE',
- '*FIXED_ARRAY_EMBEDDED_OBJECT_SUB_TYPE',
- '*FIXED_ARRAY_HANDLER_TABLE_SUB_TYPE',
- '*FIXED_ARRAY_NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE',
- '*FIXED_ARRAY_OPTIMIZED_CODE_LITERALS_SUB_TYPE',
- '*FIXED_ARRAY_SHARED_FUNCTION_INFOS_SUB_TYPE',
+ 'BUILTIN',
+ 'BYTECODE_HANDLER',
+ 'OPTIMIZED_FUNCTION',
+ 'REGEXP',
+ 'STUB',
+ 'BYTECODE_ARRAY_CONSTANT_POOL_TYPE',
+ 'BYTECODE_ARRAY_HANDLER_TABLE_TYPE',
'BYTECODE_ARRAY_TYPE',
'CODE_DATA_CONTAINER_TYPE',
+ 'DEOPTIMIZATION_DATA_TYPE',
+ 'EMBEDDED_OBJECT_TYPE',
+ 'FEEDBACK_METADATA_TYPE',
+ 'FEEDBACK_VECTOR_HEADER_TYPE',
+ 'FEEDBACK_VECTOR_ENTRY_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_CALL_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_ENUM_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_LOAD_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_OTHER_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_STORE_TYPE',
+ 'FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE',
'FEEDBACK_VECTOR_TYPE',
'LOAD_HANDLER_TYPE',
+ 'NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+ 'OPTIMIZED_CODE_LITERALS_TYPE',
+ 'SCRIPT_SOURCE_EXTERNAL_TYPE',
+ 'SCRIPT_SOURCE_NON_EXTERNAL_TYPE',
'SCRIPT_TYPE',
'SHARED_FUNCTION_INFO_TYPE',
'STORE_HANDLER_TYPE',
+ 'UNCOMPILED_JS_FUNCTION_TYPE',
+ 'UNCOMPILED_SHARED_FUNCTION_INFO_TYPE',
])
],
['unclassified', new Set()],
@@ -158,10 +177,3 @@ const CATEGORY_NAMES = new Map([
['code', 'Code'],
['unclassified', 'Unclassified'],
]);
-
-// Instance types that are constructed from their sub types and
-// should thus be hidden.
-const IGNORED_INSTANCE_TYPES = new Set([
- 'FIXED_ARRAY_TYPE',
- 'CODE_TYPE',
-]);
diff --git a/deps/v8/tools/heap-stats/details-selection.html b/deps/v8/tools/heap-stats/details-selection.html
index d60aef9669..4680e8e4a1 100644
--- a/deps/v8/tools/heap-stats/details-selection.html
+++ b/deps/v8/tools/heap-stats/details-selection.html
@@ -3,6 +3,10 @@ Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
<template id="details-selection-template">
<style>
+#dataSelectionSection {
+ display: none;
+}
+
.box {
border-left: dashed 1px #666666;
border-right: dashed 1px #666666;
@@ -20,9 +24,20 @@ found in the LICENSE file. -->
border-radius: 0px 0px 5px 5px;
}
-span {
- display: block;
- padding: 5px;
+.box > ul {
+ margin: 0px;
+ padding: 0px;
+}
+
+.box > ul > li {
+ display: inline-block;
+}
+
+.box > ul > li:not(:first-child) {
+ margin-left: 10px;
+}
+
+.box > ul > li:first-child {
font-weight: bold;
}
@@ -38,35 +53,58 @@ span {
#categories {
margin-top: 10px;
}
-</style>
-<h2>Data selection</h2>
-<ul>
- <li>
- <label for="isolate-select">
- Isolate
- </label>
- <select id="isolate-select">
- <option>No data</option>
- </select>
- </li>
- <li>
- <label for="dataset-select">
- Data set
- </label>
- <select id="dataset-select">
- <option>No data</option>
- </select>
- </li>
- <li>
- <input type="checkbox" id="merge-categories" checked=checked />
- <label for="merge-categories">
- Merge categories
- </label>
- </li>
-</ul>
+#category-filter {
+ text-align: right;
+ width: 50px;
+}
+
+</style>
+<section id="dataSelectionSection">
+ <h2>Data selection</h2>
+ <ul>
+ <li>
+ <label for="isolate-select">
+ Isolate
+ </label>
+ <select id="isolate-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <label for="dataset-select">
+ Data set
+ </label>
+ <select id="dataset-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <input type="checkbox" id="merge-categories" checked=checked />
+ <label for="merge-categories">
+ Merge categories
+ </label>
+ </li>
+ <li>
+ <label for="gc-select">
+ Garbage collection (at a specific time in ms)
+ </label>
+ <select id="gc-select">
+ <option>No data</option>
+ </select>
+ </li>
+ <li>
+ <input id="category-filter" type="text" value="0" disabled="disabled" />KB
+ <button id="category-filter-btn" disabled="disabled">Filter categories with less memory</button>
+ </li>
+ <li>
+ <button id="csv-export-btn" disabled="disabled">Export selection as CSV</button>
+ </li>
+ </ul>
-<div id="categories"></div>
+ <div id="categories"></div>
+</section>
</template>
+
<script type="text/javascript" src="categories.js"></script>
-<script type="text/javascript" src="details-selection.js"></script> \ No newline at end of file
+<script type="text/javascript" src="details-selection.js"></script>
diff --git a/deps/v8/tools/heap-stats/details-selection.js b/deps/v8/tools/heap-stats/details-selection.js
index 43c000d3f4..c5117d3165 100644
--- a/deps/v8/tools/heap-stats/details-selection.js
+++ b/deps/v8/tools/heap-stats/details-selection.js
@@ -17,8 +17,14 @@ class DetailsSelection extends HTMLElement {
'change', e => this.handleIsolateChange(e));
this.datasetSelect.addEventListener(
'change', e => this.notifySelectionChanged(e));
+ this.gcSelect.addEventListener(
+ 'change', e => this.notifySelectionChanged(e));
+ this.$('#csv-export-btn')
+ .addEventListener('click', e => this.exportCurrentSelection(e));
this.$('#merge-categories')
.addEventListener('change', e => this.notifySelectionChanged(e));
+ this.$('#category-filter-btn')
+ .addEventListener('click', e => this.filterCurrentSeclection(e));
}
connectedCallback() {
@@ -36,19 +42,36 @@ class DetailsSelection extends HTMLElement {
return this._data;
}
+ get selectedData() {
+ console.assert(this.data, 'invalid data');
+ console.assert(this.selection, 'invalid selection');
+ return this.data[this.selection.isolate]
+ .gcs[this.selection.gc][this.selection.data_set];
+ }
+
buildCategory(name) {
const div = document.createElement('div');
div.id = name;
div.classList.add('box');
- const span = document.createElement('span');
- div.appendChild(span);
- span.innerHTML = CATEGORY_NAMES.get(name) + ' ';
+ const ul = document.createElement('ul');
+ div.appendChild(ul);
+ const name_li = document.createElement('li');
+ ul.appendChild(name_li);
+ name_li.innerHTML = CATEGORY_NAMES.get(name);
+ const percent_li = document.createElement('li');
+ ul.appendChild(percent_li);
+ percent_li.innerHTML = '0%';
+ percent_li.id = name + 'PercentContent';
+ const all_li = document.createElement('li');
+ ul.appendChild(all_li);
const all_button = document.createElement('button');
- span.appendChild(all_button);
+ all_li.appendChild(all_button);
all_button.innerHTML = 'All';
all_button.addEventListener('click', e => this.selectCategory(name));
+ const none_li = document.createElement('li');
+ ul.appendChild(none_li);
const none_button = document.createElement('button');
- span.appendChild(none_button);
+ none_li.appendChild(none_button);
none_button.innerHTML = 'None';
none_button.addEventListener('click', e => this.unselectCategory(name));
const innerDiv = document.createElement('div');
@@ -69,17 +92,35 @@ class DetailsSelection extends HTMLElement {
return this.$('#isolate-select');
}
+ get gcSelect() {
+ return this.$('#gc-select');
+ }
+
dataChanged() {
- this.clearUI();
- this.populateSelect('#isolate-select', Object.keys(this.data));
+ this.selection = {categories: {}};
+ this.resetUI(true);
+ this.populateIsolateSelect();
this.handleIsolateChange();
+ this.$('#dataSelectionSection').style.display = 'block';
}
- clearUI() {
- this.selection = {categories: {}};
- removeAllChildren(this.isolateSelect);
+ populateIsolateSelect() {
+ let entries = Object.entries(this.data);
+ // Sorty by peak heap memory consumption.
+ entries.sort((a, b) => b[1].peakMemory - a[1].peakMemory);
+ this.populateSelect(
+ '#isolate-select', entries, (key, isolate) => isolate.getLabel());
+ }
+
+ resetUI(resetIsolateSelect) {
+ if (resetIsolateSelect) removeAllChildren(this.isolateSelect);
+
removeAllChildren(this.datasetSelect);
+ removeAllChildren(this.gcSelect);
this.clearCategories();
+ this.$('#csv-export-btn').disabled = 'disabled';
+ this.$('#category-filter-btn').disabled = 'disabled';
+ this.$('#category-filter').disabled = 'disabled';
}
handleIsolateChange(e) {
@@ -88,9 +129,15 @@ class DetailsSelection extends HTMLElement {
this.selection.isolate = null;
return;
}
-
+ this.resetUI(false);
+ this.populateSelect(
+ '#dataset-select',
+ this.data[this.selection.isolate].data_sets.entries(), null, 'live');
this.populateSelect(
- '#dataset-select', this.data[this.selection.isolate].data_sets, 'live');
+ '#gc-select',
+ Object.keys(this.data[this.selection.isolate].gcs)
+ .map(v => [v, this.data[this.selection.isolate].gcs[v].time]),
+ time => time + 'ms');
this.populateCategories();
this.notifySelectionChanged();
}
@@ -106,10 +153,53 @@ class DetailsSelection extends HTMLElement {
this.selection.category_names = CATEGORY_NAMES;
this.selection.data_set = this.datasetSelect.value;
this.selection.merge_categories = this.$('#merge-categories').checked;
+ this.selection.gc = this.gcSelect.value;
+ this.$('#csv-export-btn').disabled = false;
+ this.$('#category-filter-btn').disabled = false;
+ this.$('#category-filter').disabled = false;
+ this.updatePercentagesInCategory();
this.dispatchEvent(new CustomEvent(
'change', {bubbles: true, composed: true, detail: this.selection}));
}
+ filterCurrentSeclection(e) {
+ const filter_value = this.$('#category-filter').value * KB;
+ if (filter_value === 0) return;
+
+ this.selection.category_names.forEach((_, category) => {
+ for (let checkbox of this.shadowRoot.querySelectorAll(
+ 'input[name=' + category + 'Checkbox]')) {
+ checkbox.checked =
+ this.selectedData.instance_type_data[checkbox.instance_type]
+ .overall > filter_value;
+ }
+ });
+ this.notifySelectionChanged();
+ }
+
+ updatePercentagesInCategory() {
+ const overalls = {};
+ let overall = 0;
+ // Reset all categories.
+ this.selection.category_names.forEach((_, category) => {
+ this.$(`#${category}PercentContent`).innerHTML = '0%';
+ });
+ // Only update categories that have selections.
+ Object.entries(this.selection.categories).forEach(([category, value]) => {
+ overalls[category] =
+ Object.values(value).reduce(
+ (accu, current) =>
+ accu + this.selectedData.instance_type_data[current].overall,
+ 0) /
+ KB;
+ overall += overalls[category];
+ });
+ Object.entries(overalls).forEach(([category, category_overall]) => {
+ this.$(`#${category}PercentContent`).innerHTML =
+ `${(category_overall / overall * 100).toFixed(1)}%`;
+ });
+ }
+
selectedInCategory(category) {
const selected = this.shadowRoot.querySelectorAll(
'input[name=' + category + 'Checkbox]:checked');
@@ -125,17 +215,19 @@ class DetailsSelection extends HTMLElement {
return 'unclassified';
}
- createOption(text) {
+ createOption(value, text) {
const option = document.createElement('option');
- option.value = text;
+ option.value = value;
option.text = text;
return option;
}
- populateSelect(id, iterable, autoselect = null) {
- for (let option_value of iterable) {
- const option = this.createOption(option_value);
- if (autoselect === option_value) {
+ populateSelect(id, iterable, labelFn = null, autoselect = null) {
+ if (labelFn == null) labelFn = e => e;
+ for (let [key, value] of iterable) {
+ const label = labelFn(key, value);
+ const option = this.createOption(key, label);
+ if (autoselect === key) {
option.selected = 'selected';
}
this.$(id).appendChild(option);
@@ -160,7 +252,6 @@ class DetailsSelection extends HTMLElement {
for (let instance_type of this.data[this.selection.isolate]
.non_empty_instance_types) {
- if (IGNORED_INSTANCE_TYPES.has(instance_type)) continue;
const category = this.categoryForType(instance_type);
categories[category].push(instance_type);
}
@@ -198,6 +289,7 @@ class DetailsSelection extends HTMLElement {
input.name = category + 'Checkbox';
input.checked = 'checked';
input.id = instance_type + 'Checkbox';
+ input.instance_type = instance_type;
input.value = instance_type;
input.addEventListener('change', e => this.notifySelectionChanged(e));
const label = document.createElement('label');
@@ -206,6 +298,33 @@ class DetailsSelection extends HTMLElement {
label.htmlFor = instance_type + 'Checkbox';
return div;
}
+
+ exportCurrentSelection(e) {
+ const data = [];
+ const selected_data = this.data[this.selection.isolate]
+ .gcs[this.selection.gc][this.selection.data_set]
+ .instance_type_data;
+ Object.values(this.selection.categories).forEach(instance_types => {
+ instance_types.forEach(instance_type => {
+ data.push([instance_type, selected_data[instance_type].overall / KB]);
+ });
+ });
+ const createInlineContent = arrayOfRows => {
+ const content = arrayOfRows.reduce(
+ (accu, rowAsArray) => {return accu + `${rowAsArray.join(',')}\n`},
+ '');
+ return `data:text/csv;charset=utf-8,${content}`;
+ };
+ const encodedUri = encodeURI(createInlineContent(data));
+ const link = document.createElement('a');
+ link.setAttribute('href', encodedUri);
+ link.setAttribute(
+ 'download',
+ `heap_objects_data_${this.selection.isolate}_${this.selection.gc}.csv`);
+ this.shadowRoot.appendChild(link);
+ link.click();
+ this.shadowRoot.removeChild(link);
+ }
}
customElements.define('details-selection', DetailsSelection);
diff --git a/deps/v8/tools/heap-stats/global-timeline.js b/deps/v8/tools/heap-stats/global-timeline.js
index 0533f21432..f807d44bae 100644
--- a/deps/v8/tools/heap-stats/global-timeline.js
+++ b/deps/v8/tools/heap-stats/global-timeline.js
@@ -86,7 +86,6 @@ class GlobalTimeline extends HTMLElement {
}
getInstanceTypeData() {
- const categories = Object.keys(this.selection.categories);
const instance_types =
Object.values(this.selection.categories)
.reduce((accu, current) => accu.concat(current), []);
diff --git a/deps/v8/tools/heap-stats/histogram-viewer.html b/deps/v8/tools/heap-stats/histogram-viewer.html
new file mode 100644
index 0000000000..93fe980978
--- /dev/null
+++ b/deps/v8/tools/heap-stats/histogram-viewer.html
@@ -0,0 +1,19 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="histogram-viewer-template">
+<style>
+#chart {
+ width: 100%;
+ height: 800px;
+}
+</style>
+<div id="container" style="display: none;">
+ <h2>Details</h2>
+ <ul>
+ <li><span id="overall"></span></li>
+ </ul>
+ <div id="chart"></div>
+</div>
+</template>
+<script type="text/javascript" src="histogram-viewer.js"></script> \ No newline at end of file
diff --git a/deps/v8/tools/heap-stats/histogram-viewer.js b/deps/v8/tools/heap-stats/histogram-viewer.js
new file mode 100644
index 0000000000..bea1e70800
--- /dev/null
+++ b/deps/v8/tools/heap-stats/histogram-viewer.js
@@ -0,0 +1,152 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const histogram_viewer_template =
+ document.currentScript.ownerDocument.querySelector(
+ '#histogram-viewer-template');
+
+class HistogramViewer extends HTMLElement {
+ constructor() {
+ super();
+ const shadowRoot = this.attachShadow({mode: 'open'});
+ shadowRoot.appendChild(histogram_viewer_template.content.cloneNode(true));
+ }
+
+ $(id) {
+ return this.shadowRoot.querySelector(id);
+ }
+
+ set data(value) {
+ this._data = value;
+ this.stateChanged();
+ }
+
+ get data() {
+ return this._data;
+ }
+
+ set selection(value) {
+ this._selection = value;
+ this.stateChanged();
+ }
+
+ get selection() {
+ return this._selection;
+ }
+
+ isValid() {
+ return this.data && this.selection;
+ }
+
+ hide() {
+ this.$('#container').style.display = 'none';
+ }
+
+ show() {
+ this.$('#container').style.display = 'block';
+ }
+
+ stateChanged() {
+ if (this.isValid()) {
+ const overall_bytes = (this.selection.merge_categories) ?
+ this.getPropertyForCategory('overall') :
+ this.getPropertyForInstanceTypes('overall');
+ this.$('#overall').innerHTML = `Overall: ${overall_bytes / KB} KB`;
+ this.drawChart();
+ } else {
+ this.hide();
+ }
+ }
+
+ get selectedData() {
+ console.assert(this.data, 'invalid data');
+ console.assert(this.selection, 'invalid selection');
+ return this.data[this.selection.isolate]
+ .gcs[this.selection.gc][this.selection.data_set];
+ }
+
+ get selectedInstanceTypes() {
+ console.assert(this.selection, 'invalid selection');
+ return Object.values(this.selection.categories)
+ .reduce((accu, current) => accu.concat(current), []);
+ }
+
+ getPropertyForCategory(property) {
+ return Object.values(this.selection.categories)
+ .reduce(
+ (outer_accu, instance_types) => outer_accu +
+ instance_types.reduce(
+ (inner_accu, instance_type) => inner_accu +
+ this.selectedData
+ .instance_type_data[instance_type][property],
+ 0),
+ 0);
+ }
+
+ getPropertyForInstanceTypes(property) {
+ return this.selectedInstanceTypes.reduce(
+ (accu, instance_type) => accu +
+ this.selectedData.instance_type_data[instance_type][property],
+ 0);
+ }
+
+ getCategoryData() {
+ const labels = [
+ 'Bucket',
+ ...Object.keys(this.selection.categories)
+ .map(k => this.selection.category_names.get(k))
+ ];
+ const data = this.selectedData.bucket_sizes.map(
+ (bucket_size, index) =>
+ [`<${bucket_size}`,
+ ...Object.values(this.selection.categories)
+ .map(
+ instance_types =>
+ instance_types
+ .map(
+ instance_type =>
+ this.selectedData
+ .instance_type_data[instance_type]
+ .histogram[index])
+ .reduce((accu, current) => accu + current, 0))]);
+ // Adjust last histogram bucket label.
+ data[data.length - 1][0] = 'rest';
+ return [labels, ...data];
+ }
+
+ getInstanceTypeData() {
+ const instance_types = this.selectedInstanceTypes;
+ const labels = ['Bucket', ...instance_types];
+ const data = this.selectedData.bucket_sizes.map(
+ (bucket_size, index) =>
+ [`<${bucket_size}`,
+ ...instance_types.map(
+ instance_type =>
+ this.selectedData.instance_type_data[instance_type]
+ .histogram[index])]);
+ // Adjust last histogram bucket label.
+ data[data.length - 1][0] = 'rest';
+ return [labels, ...data];
+ }
+
+ drawChart() {
+ const chart_data = (this.selection.merge_categories) ?
+ this.getCategoryData() :
+ this.getInstanceTypeData();
+ const data = google.visualization.arrayToDataTable(chart_data);
+ const options = {
+ legend: {position: 'top', maxLines: '1'},
+ chartArea: {width: '85%', height: '85%'},
+ bar: {groupWidth: '80%'},
+ explorer: {},
+ };
+ const chart = new google.visualization.BarChart(this.$('#chart'));
+ this.show();
+ chart.draw(data, options);
+ }
+}
+
+customElements.define('histogram-viewer', HistogramViewer);
diff --git a/deps/v8/tools/heap-stats/index.html b/deps/v8/tools/heap-stats/index.html
index 3c2e62b6d0..3762502201 100644
--- a/deps/v8/tools/heap-stats/index.html
+++ b/deps/v8/tools/heap-stats/index.html
@@ -8,15 +8,20 @@ found in the LICENSE file. -->
<head>
<meta charset="UTF-8">
<title>V8 Heap Statistics</title>
- <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet' type='text/css'>
- <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+ <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet'>
+ <script
+ src="https://www.gstatic.com/charts/loader.js"></script>
+ <script
+ src="https://cdnjs.cloudflare.com/ajax/libs/pako/1.0.6/pako_inflate.min.js"
+ integrity="sha256-N1z6ddQzX83fjw8v7uSNe7/MgOmMKdwFUv1+AJMDqNM="
+ crossorigin="anonymous"></script>
<link rel="import" href="details-selection.html">
<link rel="import" href="global-timeline.html">
+ <link rel="import" href="histogram-viewer.html">
<link rel="import" href="trace-file-reader.html">
- <style type="text/css">
-
+ <style>
body {
font-family: 'Roboto', sans-serif;
margin-left: 5%;
@@ -24,11 +29,11 @@ body {
}
</style>
- <script type="text/javascript">
+ <script>
'use strict';
-google.charts.load('current', {'packages':['line', 'corechart']});
+google.charts.load('current', {'packages':['line', 'corechart', 'bar']});
function $(id) { return document.querySelector(id); }
@@ -47,15 +52,16 @@ function globalDataChanged(e) {
state.selection = null;
$('#global-timeline').selection = state.selection;
$('#global-timeline').data = state.data;
- $('#type-details').selection = state.selection;
- $('#type-details').data = state.data;
+ $('#histogram-viewer').selection = state.selection;
+ $('#histogram-viewer').data = state.data;
$('#details-selection').data = state.data;
}
function globalSelectionChangedA(e) {
state.selection = e.detail;
+ console.log(state.selection);
$('#global-timeline').selection = state.selection;
- $('#type-details').selection = state.selection;
+ $('#histogram-viewer').selection = state.selection;
}
</script>
@@ -63,16 +69,20 @@ function globalSelectionChangedA(e) {
<body>
<trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+
+ <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
+ <global-timeline id="global-timeline"></global-timeline>
+ <histogram-viewer id="histogram-viewer"></histogram-viewer>
+
<h1>V8 Heap Statistics</h1>
<p>Visualize object statistics that have been gathered using</p>
<ul>
- <li><code>--trace-gc-object-stats on V8</code></li>
+ <li><code>--trace-gc-object-stats</code> on V8</li>
<li>
<a
href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
tracing infrastructure</a> collecting data for the category
- <code>v8.gc_stats</code>. The trace file needs to be unpacked (e.g. using
- <code>gunzip</code>).
+ <code>v8.gc_stats</code>.
</li>
</ul>
<p>
@@ -80,9 +90,6 @@ function globalSelectionChangedA(e) {
requiring <a
href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
</p>
- <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
- <global-timeline id="global-timeline"></global-timeline>
- <type-details id="type-details"></type-details>
</body>
</html>
diff --git a/deps/v8/tools/heap-stats/model.js b/deps/v8/tools/heap-stats/model.js
new file mode 100644
index 0000000000..1afd10a563
--- /dev/null
+++ b/deps/v8/tools/heap-stats/model.js
@@ -0,0 +1,77 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+class Isolate {
+ constructor(address) {
+ this.address = address;
+ this.start = null;
+ this.end = null;
+ this.samples = Object.create(null);
+ this.non_empty_instance_types = new Set();
+ this.gcs = Object.create(null);
+ this.zonetags = [];
+ this.samples = {zone: {}};
+ this.data_sets = new Set();
+ this.peakMemory = 0;
+ }
+
+ finalize() {
+ Object.values(this.gcs).forEach(gc => this.finalizeGC(gc));
+ }
+
+ getLabel() {
+ let label = `${this.address}: gc=#${Object.keys(this.gcs).length}`;
+ const peakSizeMB = Math.round(this.peakMemory / 1024 / 1024 * 100) / 100;
+ label += ` max=${peakSizeMB}MB`
+ return label;
+ }
+
+ finalizeGC(gc_data) {
+ this.data_sets.forEach(key => this.finalizeDataSet(gc_data[key]));
+ if ('live' in gc_data) {
+ this.peakMemory = Math.max(this.peakMemory, gc_data['live'].overall);
+ }
+ }
+
+ finalizeDataSet(data_set) {
+ // Create a ranked instance type array that sorts instance types by
+ // memory size (overall).
+ data_set.ranked_instance_types =
+ [...data_set.non_empty_instance_types].sort(function(a, b) {
+ if (data_set.instance_type_data[a].overall >
+ data_set.instance_type_data[b].overall) {
+ return 1;
+ } else if (
+ data_set.instance_type_data[a].overall <
+ data_set.instance_type_data[b].overall) {
+ return -1;
+ }
+ return 0;
+ });
+
+ Object.entries(data_set.instance_type_data).forEach(([name, entry]) => {
+ this.checkHistogram(
+ name, entry, data_set.bucket_sizes, 'histogram', ' overall');
+ this.checkHistogram(
+ name, entry, data_set.bucket_sizes, 'over_allocated_histogram',
+ ' over_allocated');
+ });
+ }
+
+ // Check that a lower bound for histogram memory does not exceed the
+ // overall counter.
+ checkHistogram(type, entry, bucket_sizes, histogram, overallProperty) {
+ let sum = 0;
+ for (let i = 1; i < entry[histogram].length; i++) {
+ sum += entry[histogram][i] * bucket_sizes[i - 1];
+ }
+ const overall = entry[overallProperty];
+ if (sum >= overall) {
+ console.error(
+ `${type}: sum('${histogram}') > overall (${sum} > ${overall})`);
+ }
+ }
+}
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.html b/deps/v8/tools/heap-stats/trace-file-reader.html
index 98c2ef0c60..73de98ab03 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.html
+++ b/deps/v8/tools/heap-stats/trace-file-reader.html
@@ -10,17 +10,66 @@ found in the LICENSE file. -->
text-align: center;
border: solid 1px #000000;
border-radius: 5px;
+ cursor: pointer;
+}
+
+.loading #fileReader {
+ cursor: wait;
}
#fileReader > input {
display: none;
}
+
+#loader {
+ display: none;
+}
+
+.loading #loader{
+ display: block;
+ position: fixed;
+ top: 0px;
+ left: 0px;
+ width: 100%;
+ height: 100%;
+ background-color: rgba(255, 255, 255, 0.5);
+}
+
+#spinner {
+ position: absolute;
+ width: 100px;
+ height: 100px;
+ top: 40%;
+ left: 50%;
+ margin-left: -50px;
+ border: 30px solid #000;
+ border-top: 30px solid #36E;
+ border-radius: 50%;
+ animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+ 0% {
+ transform: rotate(0deg);
+ };
+ 100% {
+ transform: rotate(360deg);
+ };
+}
</style>
-<div id="fileReader">
- <span id="label">
- Drag and drop a trace file into this area, or click to choose from disk.
- </span>
- <input id="file" type="file" name="file" />
-</div>
+
+<section id="fileReaderSection">
+ <div id="fileReader">
+ <span id="label">
+ Drag and drop a trace file into this area, or click to choose from disk.
+ </span>
+ <input id="file" type="file" name="file" />
+ </div>
+ <div id="loader">
+ <div id="spinner"></div>
+ </div>
+</section>
</template>
-<script type="text/javascript" src="trace-file-reader.js"></script>
+<script type="text/javascript" src="model.js"></script>
+
+<script src="trace-file-reader.js"></script>
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
index 59825fe514..ef563a43cb 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.js
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -23,6 +23,10 @@ class TraceFileReader extends HTMLElement {
return this.shadowRoot.querySelector(id);
}
+ get section() {
+ return this.$('#fileReaderSection');
+ }
+
updateLabel(text) {
this.$('#label').innerText = text;
}
@@ -50,34 +54,42 @@ class TraceFileReader extends HTMLElement {
return;
}
- const result = new FileReader();
- result.onload = (e) => {
- let contents = e.target.result.split('\n');
- const return_data = (e.target.result.includes('V8.GC_Objects_Stats')) ?
- this.createModelFromChromeTraceFile(contents) :
- this.createModelFromV8TraceFile(contents);
- this.updateLabel('Finished loading \'' + file.name + '\'.');
- this.dispatchEvent(new CustomEvent(
- 'change', {bubbles: true, composed: true, detail: return_data}));
- };
- result.readAsText(file);
+ this.section.className = 'loading';
+ const reader = new FileReader();
+
+ if (['application/gzip', 'application/x-gzip'].includes(file.type)) {
+ reader.onload = (e) => {
+ try {
+ const textResult = pako.inflate(e.target.result, {to: 'string'});
+ this.processRawText(file, textResult);
+ this.section.className = 'success';
+ } catch (err) {
+ console.error(err);
+ this.section.className = 'failure';
+ }
+ };
+ reader.readAsArrayBuffer(file);
+ } else {
+ reader.onload = (e) => this.processRawText(file, e.target.result);
+ reader.readAsText(file);
+ }
}
- createOrUpdateEntryIfNeeded(data, keys, entry) {
+ processRawText(file, result) {
+ let contents = result.split('\n');
+ const return_data = (result.includes('V8.GC_Objects_Stats')) ?
+ this.createModelFromChromeTraceFile(contents) :
+ this.createModelFromV8TraceFile(contents);
+ this.extendAndSanitizeModel(return_data);
+ this.updateLabel('Finished loading \'' + file.name + '\'.');
+ this.dispatchEvent(new CustomEvent(
+ 'change', {bubbles: true, composed: true, detail: return_data}));
+ }
+
+ createOrUpdateEntryIfNeeded(data, entry) {
console.assert(entry.isolate, 'entry should have an isolate');
- if (!(entry.isolate in keys)) {
- keys[entry.isolate] = new Set();
- }
if (!(entry.isolate in data)) {
- data[entry.isolate] = {
- non_empty_instance_types: new Set(),
- gcs: {},
- zonetags: [],
- samples: {zone: {}},
- start: null,
- end: null,
- data_sets: new Set()
- };
+ data[entry.isolate] = new Isolate(entry.isolate);
}
const data_object = data[entry.isolate];
if (('id' in entry) && !(entry.id in data_object.gcs)) {
@@ -91,7 +103,7 @@ class TraceFileReader extends HTMLElement {
}
}
- createDatasetIfNeeded(data, keys, entry, data_set) {
+ createDatasetIfNeeded(data, entry, data_set) {
if (!(data_set in data[entry.isolate].gcs[entry.id])) {
data[entry.isolate].gcs[entry.id][data_set] = {
instance_type_data: {},
@@ -102,9 +114,7 @@ class TraceFileReader extends HTMLElement {
}
}
- addInstanceTypeData(
- data, keys, isolate, gc_id, data_set, instance_type, entry) {
- keys[isolate].add(data_set);
+ addInstanceTypeData(data, isolate, gc_id, data_set, instance_type, entry) {
data[isolate].gcs[gc_id][data_set].instance_type_data[instance_type] = {
overall: entry.overall,
count: entry.count,
@@ -121,117 +131,75 @@ class TraceFileReader extends HTMLElement {
}
}
- extendAndSanitizeModel(data, keys) {
+ extendAndSanitizeModel(data) {
const checkNonNegativeProperty = (obj, property) => {
console.assert(obj[property] >= 0, 'negative property', obj, property);
};
- for (const isolate of Object.keys(data)) {
- for (const gc of Object.keys(data[isolate].gcs)) {
- for (const data_set_key of keys[isolate]) {
- const data_set = data[isolate].gcs[gc][data_set_key];
- // 1. Create a ranked instance type array that sorts instance
- // types by memory size (overall).
- data_set.ranked_instance_types =
- [...data_set.non_empty_instance_types].sort(function(a, b) {
- if (data_set.instance_type_data[a].overall >
- data_set.instance_type_data[b].overall) {
- return 1;
- } else if (
- data_set.instance_type_data[a].overall <
- data_set.instance_type_data[b].overall) {
- return -1;
- }
- return 0;
- });
-
- let known_count = 0;
- let known_overall = 0;
- let known_histogram =
- Array(
- data_set.instance_type_data.FIXED_ARRAY_TYPE.histogram.length)
- .fill(0);
- for (const instance_type in data_set.instance_type_data) {
- if (!instance_type.startsWith('*FIXED_ARRAY')) continue;
- const subtype = data_set.instance_type_data[instance_type];
- known_count += subtype.count;
- known_overall += subtype.count;
- for (let i = 0; i < subtype.histogram.length; i++) {
- known_histogram[i] += subtype.histogram[i];
- }
- }
-
- const fixed_array_data = data_set.instance_type_data.FIXED_ARRAY_TYPE;
- const unknown_entry = {
- count: fixed_array_data.count - known_count,
- overall: fixed_array_data.overall - known_overall,
- histogram: fixed_array_data.histogram.map(
- (value, index) => value - known_histogram[index])
- };
-
- // Check for non-negative values.
- checkNonNegativeProperty(unknown_entry, 'count');
- checkNonNegativeProperty(unknown_entry, 'overall');
- for (let i = 0; i < unknown_entry.histogram.length; i++) {
- checkNonNegativeProperty(unknown_entry.histogram, i);
- }
-
- data_set.instance_type_data['*FIXED_ARRAY_UNKNOWN_SUB_TYPE'] =
- unknown_entry;
- data_set.non_empty_instance_types.add(
- '*FIXED_ARRAY_UNKNOWN_SUB_TYPE');
- }
- }
- }
+ Object.values(data).forEach(isolate => isolate.finalize());
}
createModelFromChromeTraceFile(contents) {
- console.log('Processing log as chrome trace file.');
- const data = Object.create(null); // Final data container.
- const keys = Object.create(null); // Collecting 'keys' per isolate.
+ // Trace files support two formats.
+ // {traceEvents: [ data ]}
+ const kObjectTraceFile = {
+ name: 'object',
+ endToken: ']}',
+ getDataArray: o => o.traceEvents
+ };
+ // [ data ]
+ const kArrayTraceFile = {
+ name: 'array',
+ endToken: ']',
+ getDataArray: o => o
+ };
+ const handler =
+ (contents[0][0] === '{') ? kObjectTraceFile : kArrayTraceFile;
+ console.log(`Processing log as chrome trace file (${handler.name}).`);
// Pop last line in log as it might be broken.
contents.pop();
// Remove trailing comma.
contents[contents.length - 1] = contents[contents.length - 1].slice(0, -1);
// Terminate JSON.
- const sanitized_contents = [...contents, ']}'].join('');
+ const sanitized_contents = [...contents, handler.endToken].join('');
+
+ const data = Object.create(null); // Final data container.
try {
const raw_data = JSON.parse(sanitized_contents);
- const objects_stats_data =
- raw_data.traceEvents.filter(e => e.name == 'V8.GC_Objects_Stats');
- objects_stats_data.forEach(trace_data => {
- const actual_data = trace_data.args;
- const data_sets = new Set(Object.keys(actual_data));
- Object.keys(actual_data).forEach(data_set => {
- const string_entry = actual_data[data_set];
- try {
- const entry = JSON.parse(string_entry);
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
- this.createDatasetIfNeeded(data, keys, entry, data_set);
- const isolate = entry.isolate;
- const time = entry.time;
- const gc_id = entry.id;
- data[isolate].gcs[gc_id].time = time;
- data[isolate].gcs[gc_id][data_set].bucket_sizes =
- entry.bucket_sizes;
- for (let [instance_type, value] of Object.entries(
- entry.type_data)) {
- // Trace file format uses markers that do not have actual
- // properties.
- if (!('overall' in value)) continue;
- this.addInstanceTypeData(
- data, keys, isolate, gc_id, data_set, instance_type, value);
- }
- } catch (e) {
- console.log('Unable to parse data set entry', e);
- }
- });
- });
+ const raw_array_data = handler.getDataArray(raw_data);
+ raw_array_data.filter(e => e.name === 'V8.GC_Objects_Stats')
+ .forEach(trace_data => {
+ const actual_data = trace_data.args;
+ const data_sets = new Set(Object.keys(actual_data));
+ Object.keys(actual_data).forEach(data_set => {
+ const string_entry = actual_data[data_set];
+ try {
+ const entry = JSON.parse(string_entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
+ this.createDatasetIfNeeded(data, entry, data_set);
+ const isolate = entry.isolate;
+ const time = entry.time;
+ const gc_id = entry.id;
+ data[isolate].gcs[gc_id].time = time;
+ data[isolate].gcs[gc_id][data_set].bucket_sizes =
+ entry.bucket_sizes;
+ for (let [instance_type, value] of Object.entries(
+ entry.type_data)) {
+ // Trace file format uses markers that do not have actual
+ // properties.
+ if (!('overall' in value)) continue;
+ this.addInstanceTypeData(
+ data, isolate, gc_id, data_set, instance_type, value);
+ }
+ } catch (e) {
+ console.log('Unable to parse data set entry', e);
+ }
+ });
+ });
} catch (e) {
- console.log('Unable to parse chrome trace file.', e);
+ console.error('Unable to parse chrome trace file.', e);
}
- this.extendAndSanitizeModel(data, keys);
return data;
}
@@ -249,14 +217,12 @@ class TraceFileReader extends HTMLElement {
});
const data = Object.create(null); // Final data container.
- const keys = Object.create(null); // Collecting 'keys' per isolate.
-
for (var entry of contents) {
if (entry === null || entry.type === undefined) {
continue;
}
if (entry.type === 'zone') {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
const stacktrace = ('stacktrace' in entry) ? entry.stacktrace : [];
data[entry.isolate].samples.zone[entry.time] = {
allocated: entry.allocated,
@@ -265,26 +231,26 @@ class TraceFileReader extends HTMLElement {
};
} else if (
entry.type === 'zonecreation' || entry.type === 'zonedestruction') {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
data[entry.isolate].zonetags.push(
Object.assign({opening: entry.type === 'zonecreation'}, entry));
} else if (entry.type === 'gc_descriptor') {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
+ this.createOrUpdateEntryIfNeeded(data, entry);
data[entry.isolate].gcs[entry.id].time = entry.time;
if ('zone' in entry)
data[entry.isolate].gcs[entry.id].malloced = entry.zone;
} else if (entry.type === 'instance_type_data') {
if (entry.id in data[entry.isolate].gcs) {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
- this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ this.createOrUpdateEntryIfNeeded(data, entry);
+ this.createDatasetIfNeeded(data, entry, entry.key);
this.addInstanceTypeData(
- data, keys, entry.isolate, entry.id, entry.key,
+ data, entry.isolate, entry.id, entry.key,
entry.instance_type_name, entry);
}
} else if (entry.type === 'bucket_sizes') {
if (entry.id in data[entry.isolate].gcs) {
- this.createOrUpdateEntryIfNeeded(data, keys, entry);
- this.createDatasetIfNeeded(data, keys, entry, entry.key);
+ this.createOrUpdateEntryIfNeeded(data, entry);
+ this.createDatasetIfNeeded(data, entry, entry.key);
data[entry.isolate].gcs[entry.id][entry.key].bucket_sizes =
entry.sizes;
}
@@ -292,7 +258,6 @@ class TraceFileReader extends HTMLElement {
console.log('Unknown entry type: ' + entry.type);
}
}
- this.extendAndSanitizeModel(data, keys);
return data;
}
}
diff --git a/deps/v8/tools/isolate_driver.py b/deps/v8/tools/isolate_driver.py
index a6bcfbf71f..32077e236f 100644
--- a/deps/v8/tools/isolate_driver.py
+++ b/deps/v8/tools/isolate_driver.py
@@ -4,7 +4,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""Adaptor script called through build/isolate.gypi.
+"""Adaptor script called through gni/isolate.gni.
Creates a wrapping .isolate which 'includes' the original one, that can be
consumed by tools/swarming_client/isolate.py. Path variables are determined
diff --git a/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py b/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
deleted file mode 100644
index 2925213ced..0000000000
--- a/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import re
-import subprocess
-
-FUZZ_PATH = os.path.dirname(os.path.abspath(__file__))
-SHA1_PATH = os.path.join(FUZZ_PATH, 'jsfunfuzz.tar.gz.sha1')
-
-if re.search(r'\bjsfunfuzz=1', os.environ.get('GYP_DEFINES', '')):
- subprocess.check_call([
- 'download_from_google_storage',
- '-b', 'chrome-v8-jsfunfuzz',
- '-u', '--no_resume',
- '-s', SHA1_PATH,
- '--platform=linux*'
- ])
diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
deleted file mode 100644
index 8938e44538..0000000000
--- a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'jsfunfuzz_run',
- 'type': 'none',
- 'dependencies': [
- '../../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../../gypfiles/features.gypi',
- '../../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'jsfunfuzz.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index 9a6600225b..b97ce455c2 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -4,16 +4,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""MB - the Meta-Build wrapper around GYP and GN
+"""MB - the Meta-Build wrapper around GN.
-MB is a wrapper script for GYP and GN that can be used to generate build files
+MB is a wrapper script for GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
-# TODO(thomasanderson): Remove this comment. It is added to
-# workaround https://crbug.com/736215 for CL
-# https://codereview.chromium.org/2974603002/
-
from __future__ import print_function
import argparse
@@ -22,6 +18,7 @@ import errno
import json
import os
import pipes
+import platform
import pprint
import re
import shutil
@@ -95,21 +92,17 @@ class MetaBuildWrapper(object):
help='path to config file '
'(default is %(default)s)')
subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
- default=self.default_isolate_map,
help='path to isolate map file '
- '(default is %(default)s)')
+ '(default is %(default)s)',
+ default=[],
+ action='append',
+ dest='isolate_map_files')
subp.add_argument('-g', '--goma-dir',
help='path to goma directory')
- subp.add_argument('--gyp-script', metavar='PATH',
- default=self.PathJoin('build', 'gyp_chromium'),
- help='path to gyp script relative to project root '
- '(default is %(default)s)')
subp.add_argument('--android-version-code',
- help='Sets GN arg android_default_version_code and '
- 'GYP_DEFINE app_manifest_version_code')
+ help='Sets GN arg android_default_version_code')
subp.add_argument('--android-version-name',
- help='Sets GN arg android_default_version_name and '
- 'GYP_DEFINE app_manifest_version_name')
+ help='Sets GN arg android_default_version_name')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
@@ -190,7 +183,6 @@ class MetaBuildWrapper(object):
' --test-launcher-retry-limit=0'
'\n'
)
-
AddCommonOptions(subp)
subp.add_argument('-j', '--jobs', dest='jobs', type=int,
help='Number of jobs to pass to ninja')
@@ -202,6 +194,14 @@ class MetaBuildWrapper(object):
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
+ subp.add_argument('-s', '--swarmed', action='store_true',
+ help='Run under swarming with the default dimensions')
+ subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
+ dest='dimensions', metavar='FOO bar',
+ help='dimension to filter on')
+ subp.add_argument('--no-default-dimensions', action='store_false',
+ dest='default_dimensions', default=True,
+ help='Do not automatically add dimensions to the task')
subp.add_argument('target', nargs=1,
help='ninja target to build and run')
subp.add_argument('extra_args', nargs='*',
@@ -217,26 +217,6 @@ class MetaBuildWrapper(object):
help='path to config file (default is %(default)s)')
subp.set_defaults(func=self.CmdValidate)
- subp = subps.add_parser('audit',
- help='Audit the config file to track progress')
- subp.add_argument('-f', '--config-file', metavar='PATH',
- default=self.default_config,
- help='path to config file (default is %(default)s)')
- subp.add_argument('-i', '--internal', action='store_true',
- help='check internal masters also')
- subp.add_argument('-m', '--master', action='append',
- help='master to audit (default is all non-internal '
- 'masters in file)')
- subp.add_argument('-u', '--url-template', action='store',
- default='https://build.chromium.org/p/'
- '{master}/json/builders',
- help='URL scheme for JSON APIs to buildbot '
- '(default: %(default)s) ')
- subp.add_argument('-c', '--check-compile', action='store_true',
- help='check whether tbd and master-only bots actually'
- ' do compiles')
- subp.set_defaults(func=self.CmdAudit)
-
subp = subps.add_parser('gerrit-buildbucket-config',
help='Print buildbucket.config for gerrit '
'(see MB user guide)')
@@ -276,11 +256,7 @@ class MetaBuildWrapper(object):
def CmdAnalyze(self):
vals = self.Lookup()
- self.ClobberIfNeeded(vals)
- if vals['type'] == 'gn':
- return self.RunGNAnalyze(vals)
- else:
- return self.RunGYPAnalyze(vals)
+ return self.RunGNAnalyze(vals)
def CmdExport(self):
self.ReadConfigFile()
@@ -312,11 +288,7 @@ class MetaBuildWrapper(object):
def CmdGen(self):
vals = self.Lookup()
- self.ClobberIfNeeded(vals)
- if vals['type'] == 'gn':
- return self.RunGNGen(vals)
- else:
- return self.RunGYPGen(vals)
+ return self.RunGNGen(vals)
def CmdHelp(self):
if self.args.subcommand:
@@ -328,21 +300,14 @@ class MetaBuildWrapper(object):
vals = self.GetConfig()
if not vals:
return 1
-
- if vals['type'] == 'gn':
- return self.RunGNIsolate()
- else:
- return self.Build('%s_run' % self.args.target[0])
+ return self.RunGNIsolate()
def CmdLookup(self):
vals = self.Lookup()
- if vals['type'] == 'gn':
- cmd = self.GNCmd('gen', '_path_')
- gn_args = self.GNArgs(vals)
- self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
- env = None
- else:
- cmd, env = self.GYPCmd('_path_', vals)
+ cmd = self.GNCmd('gen', '_path_')
+ gn_args = self.GNArgs(vals)
+ self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
+ env = None
self.PrintCmd(cmd, env)
return 0
@@ -355,32 +320,86 @@ class MetaBuildWrapper(object):
build_dir = self.args.path[0]
target = self.args.target[0]
- if vals['type'] == 'gn':
- if self.args.build:
- ret = self.Build(target)
- if ret:
- return ret
- ret = self.RunGNIsolate()
+ if self.args.build:
+ ret = self.Build(target)
if ret:
return ret
+ ret = self.RunGNIsolate()
+ if ret:
+ return ret
+
+ if self.args.swarmed:
+ return self._RunUnderSwarming(build_dir, target)
else:
- ret = self.Build('%s_run' % target)
- if ret:
- return ret
+ return self._RunLocallyIsolated(build_dir, target)
+
+ def _RunUnderSwarming(self, build_dir, target):
+ # TODO(dpranke): Look up the information for the target in
+ # the //testing/buildbot.json file, if possible, so that we
+ # can determine the isolate target, command line, and additional
+ # swarming parameters, if possible.
+ #
+ # TODO(dpranke): Also, add support for sharding and merging results.
+ dimensions = []
+ for k, v in self._DefaultDimensions() + self.args.dimensions:
+ dimensions += ['-d', k, v]
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+ 'archive',
+ '-s',
+ self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+ '-I', 'isolateserver.appspot.com',
+ ]
+ ret, out, _ = self.Run(cmd, force_verbose=False)
+ if ret:
+ return ret
+
+ isolated_hash = out.splitlines()[0].split()[0]
+ cmd = [
+ self.executable,
+ self.PathJoin('tools', 'swarming_client', 'swarming.py'),
+ 'run',
+ '-s', isolated_hash,
+ '-I', 'isolateserver.appspot.com',
+ '-S', 'chromium-swarm.appspot.com',
+ ] + dimensions
+ if self.args.extra_args:
+ cmd += ['--'] + self.args.extra_args
+ ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+ return ret
+
+ def _RunLocallyIsolated(self, build_dir, target):
+ cmd = [
+ self.executable,
+ self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'run',
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
- ]
+ ]
if self.args.extra_args:
- cmd += ['--'] + self.args.extra_args
+ cmd += ['--'] + self.args.extra_args
+ ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+ return ret
- ret, _, _ = self.Run(cmd, force_verbose=False, buffer_output=False)
+ def _DefaultDimensions(self):
+ if not self.args.default_dimensions:
+ return []
+
+ # This code is naive and just picks reasonable defaults per platform.
+ if self.platform == 'darwin':
+ os_dim = ('os', 'Mac-10.12')
+ elif self.platform.startswith('linux'):
+ os_dim = ('os', 'Ubuntu-14.04')
+ elif self.platform == 'win32':
+ os_dim = ('os', 'Windows-10-14393')
+ else:
+ raise MBErr('unrecognized platform string "%s"' % self.platform)
- return ret
+ return [('pool', 'Chrome'),
+ ('cpu', 'x86-64'),
+ os_dim]
def CmdBuildbucket(self):
self.ReadConfigFile()
@@ -462,154 +481,26 @@ class MetaBuildWrapper(object):
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
- def CmdAudit(self):
- """Track the progress of the GYP->GN migration on the bots."""
-
- # First, make sure the config file is okay, but don't print anything
- # if it is (it will throw an error if it isn't).
- self.CmdValidate(print_ok=False)
-
- stats = OrderedDict()
- STAT_MASTER_ONLY = 'Master only'
- STAT_CONFIG_ONLY = 'Config only'
- STAT_TBD = 'Still TBD'
- STAT_GYP = 'Still GYP'
- STAT_DONE = 'Done (on GN)'
- stats[STAT_MASTER_ONLY] = 0
- stats[STAT_CONFIG_ONLY] = 0
- stats[STAT_TBD] = 0
- stats[STAT_GYP] = 0
- stats[STAT_DONE] = 0
-
- def PrintBuilders(heading, builders, notes):
- stats.setdefault(heading, 0)
- stats[heading] += len(builders)
- if builders:
- self.Print(' %s:' % heading)
- for builder in sorted(builders):
- self.Print(' %s%s' % (builder, notes[builder]))
-
- self.ReadConfigFile()
-
- masters = self.args.master or self.masters
- for master in sorted(masters):
- url = self.args.url_template.replace('{master}', master)
-
- self.Print('Auditing %s' % master)
-
- MASTERS_TO_SKIP = (
- 'client.skia',
- 'client.v8.fyi',
- 'tryserver.v8',
- )
- if master in MASTERS_TO_SKIP:
- # Skip these bots because converting them is the responsibility of
- # those teams and out of scope for the Chromium migration to GN.
- self.Print(' Skipped (out of scope)')
- self.Print('')
- continue
-
- INTERNAL_MASTERS = ('official.desktop', 'official.desktop.continuous',
- 'internal.client.kitchensync')
- if master in INTERNAL_MASTERS and not self.args.internal:
- # Skip these because the servers aren't accessible by default ...
- self.Print(' Skipped (internal)')
- self.Print('')
- continue
-
- try:
- # Fetch the /builders contents from the buildbot master. The
- # keys of the dict are the builder names themselves.
- json_contents = self.Fetch(url)
- d = json.loads(json_contents)
- except Exception as e:
- self.Print(str(e))
- return 1
-
- config_builders = set(self.masters[master])
- master_builders = set(d.keys())
- both = master_builders & config_builders
- master_only = master_builders - config_builders
- config_only = config_builders - master_builders
- tbd = set()
- gyp = set()
- done = set()
- notes = {builder: '' for builder in config_builders | master_builders}
-
- for builder in both:
- config = self.masters[master][builder]
- if config == 'tbd':
- tbd.add(builder)
- elif isinstance(config, dict):
- vals = self.FlattenConfig(config.values()[0])
- if vals['type'] == 'gyp':
- gyp.add(builder)
- else:
- done.add(builder)
- elif config.startswith('//'):
- done.add(builder)
- else:
- vals = self.FlattenConfig(config)
- if vals['type'] == 'gyp':
- gyp.add(builder)
- else:
- done.add(builder)
-
- if self.args.check_compile and (tbd or master_only):
- either = tbd | master_only
- for builder in either:
- notes[builder] = ' (' + self.CheckCompile(master, builder) +')'
-
- if master_only or config_only or tbd or gyp:
- PrintBuilders(STAT_MASTER_ONLY, master_only, notes)
- PrintBuilders(STAT_CONFIG_ONLY, config_only, notes)
- PrintBuilders(STAT_TBD, tbd, notes)
- PrintBuilders(STAT_GYP, gyp, notes)
- else:
- self.Print(' All GN!')
-
- stats[STAT_DONE] += len(done)
-
- self.Print('')
-
- fmt = '{:<27} {:>4}'
- self.Print(fmt.format('Totals', str(sum(int(v) for v in stats.values()))))
- self.Print(fmt.format('-' * 27, '----'))
- for stat, count in stats.items():
- self.Print(fmt.format(stat, str(count)))
-
- return 0
-
def GetConfig(self):
build_dir = self.args.path[0]
vals = self.DefaultVals()
if self.args.builder or self.args.master or self.args.config:
vals = self.Lookup()
- if vals['type'] == 'gn':
- # Re-run gn gen in order to ensure the config is consistent with the
- # build dir.
- self.RunGNGen(vals)
+ # Re-run gn gen in order to ensure the config is consistent with the
+ # build dir.
+ self.RunGNGen(vals)
return vals
- mb_type_path = self.PathJoin(self.ToAbsPath(build_dir), 'mb_type')
- if not self.Exists(mb_type_path):
- toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
- 'toolchain.ninja')
- if not self.Exists(toolchain_path):
- self.Print('Must either specify a path to an existing GN build dir '
- 'or pass in a -m/-b pair or a -c flag to specify the '
- 'configuration')
- return {}
- else:
- mb_type = 'gn'
- else:
- mb_type = self.ReadFile(mb_type_path).strip()
-
- if mb_type == 'gn':
- vals['gn_args'] = self.GNArgsFromDir(build_dir)
- vals['type'] = mb_type
+ toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
+ 'toolchain.ninja')
+ if not self.Exists(toolchain_path):
+ self.Print('Must either specify a path to an existing GN build dir '
+ 'or pass in a -m/-b pair or a -c flag to specify the '
+ 'configuration')
+ return {}
+ vals['gn_args'] = self.GNArgsFromDir(build_dir)
return vals
def GNArgsFromDir(self, build_dir):
@@ -641,14 +532,6 @@ class MetaBuildWrapper(object):
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
vals = self.FlattenConfig(config)
-
- # Do some basic sanity checking on the config so that we
- # don't have to do this in every caller.
- if 'type' not in vals:
- vals['type'] = 'gn'
- assert vals['type'] in ('gn', 'gyp'), (
- 'Unknown meta-build type "%s"' % vals['gn_args'])
-
return vals
def ReadIOSBotConfig(self):
@@ -660,17 +543,10 @@ class MetaBuildWrapper(object):
return {}
contents = json.loads(self.ReadFile(path))
- gyp_vals = contents.get('GYP_DEFINES', {})
- if isinstance(gyp_vals, dict):
- gyp_defines = ' '.join('%s=%s' % (k, v) for k, v in gyp_vals.items())
- else:
- gyp_defines = ' '.join(gyp_vals)
gn_args = ' '.join(contents.get('gn_args', []))
vals = self.DefaultVals()
vals['gn_args'] = gn_args
- vals['gyp_defines'] = gyp_defines
- vals['type'] = contents.get('mb_type', 'gn')
return vals
def ReadConfigFile(self):
@@ -689,14 +565,26 @@ class MetaBuildWrapper(object):
self.mixins = contents['mixins']
def ReadIsolateMap(self):
- if not self.Exists(self.args.isolate_map_file):
- raise MBErr('isolate map file not found at %s' %
- self.args.isolate_map_file)
- try:
- return ast.literal_eval(self.ReadFile(self.args.isolate_map_file))
- except SyntaxError as e:
- raise MBErr('Failed to parse isolate map file "%s": %s' %
- (self.args.isolate_map_file, e))
+ if not self.args.isolate_map_files:
+ self.args.isolate_map_files = [self.default_isolate_map]
+
+ for f in self.args.isolate_map_files:
+ if not self.Exists(f):
+ raise MBErr('isolate map file not found at %s' % f)
+ isolate_maps = {}
+ for isolate_map in self.args.isolate_map_files:
+ try:
+ isolate_map = ast.literal_eval(self.ReadFile(isolate_map))
+ duplicates = set(isolate_map).intersection(isolate_maps)
+ if duplicates:
+ raise MBErr(
+ 'Duplicate targets in isolate map files: %s.' %
+ ', '.join(duplicates))
+ isolate_maps.update(isolate_map)
+ except SyntaxError as e:
+ raise MBErr(
+ 'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
+ return isolate_maps
def ConfigFromArgs(self):
if self.args.config:
@@ -747,9 +635,6 @@ class MetaBuildWrapper(object):
'args_file': '',
'cros_passthrough': False,
'gn_args': '',
- 'gyp_defines': '',
- 'gyp_crosscompile': False,
- 'type': 'gn',
}
def FlattenMixins(self, mixins, vals, visited):
@@ -773,50 +658,11 @@ class MetaBuildWrapper(object):
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
- if 'gyp_crosscompile' in mixin_vals:
- vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile']
- if 'gyp_defines' in mixin_vals:
- if vals['gyp_defines']:
- vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
- else:
- vals['gyp_defines'] = mixin_vals['gyp_defines']
- if 'type' in mixin_vals:
- vals['type'] = mixin_vals['type']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
- def ClobberIfNeeded(self, vals):
- path = self.args.path[0]
- build_dir = self.ToAbsPath(path)
- mb_type_path = self.PathJoin(build_dir, 'mb_type')
- needs_clobber = False
- new_mb_type = vals['type']
- if self.Exists(build_dir):
- if self.Exists(mb_type_path):
- old_mb_type = self.ReadFile(mb_type_path)
- if old_mb_type != new_mb_type:
- self.Print("Build type mismatch: was %s, will be %s, clobbering %s" %
- (old_mb_type, new_mb_type, path))
- needs_clobber = True
- else:
- # There is no 'mb_type' file in the build directory, so this probably
- # means that the prior build(s) were not done through mb, and we
- # have no idea if this was a GYP build or a GN build. Clobber it
- # to be safe.
- self.Print("%s/mb_type missing, clobbering to be safe" % path)
- needs_clobber = True
-
- if self.args.dryrun:
- return
-
- if needs_clobber:
- self.RemoveDirectory(build_dir)
-
- self.MaybeMakeDirectory(build_dir)
- self.WriteFile(mb_type_path, new_mb_type)
-
def RunGNGen(self, vals, compute_grit_inputs_for_analyze=False):
build_dir = self.args.path[0]
@@ -861,6 +707,7 @@ class MetaBuildWrapper(object):
return ret
android = 'target_os="android"' in vals['gn_args']
+ fuchsia = 'target_os="fuchsia"' in vals['gn_args']
for target in swarming_targets:
if android:
# Android targets may be either android_apk or executable. The former
@@ -870,6 +717,11 @@ class MetaBuildWrapper(object):
runtime_deps_targets = [
target + '.runtime_deps',
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
+ elif fuchsia:
+ # Only emit a runtime deps file for the group() target on Fuchsia.
+ label = isolate_map[target]['label']
+ runtime_deps_targets = [
+ 'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
elif (isolate_map[target]['type'] == 'script' or
isolate_map[target].get('label_type') == 'group'):
# For script targets, the build target is usually a group,
@@ -1023,38 +875,6 @@ class MetaBuildWrapper(object):
gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
return gn_args
- def RunGYPGen(self, vals):
- path = self.args.path[0]
-
- output_dir = self.ParseGYPConfigPath(path)
- cmd, env = self.GYPCmd(output_dir, vals)
- ret, _, _ = self.Run(cmd, env=env)
- return ret
-
- def RunGYPAnalyze(self, vals):
- output_dir = self.ParseGYPConfigPath(self.args.path[0])
- if self.args.verbose:
- inp = self.ReadInputJSON(['files', 'test_targets',
- 'additional_compile_targets'])
- self.Print()
- self.Print('analyze input:')
- self.PrintJSON(inp)
- self.Print()
-
- cmd, env = self.GYPCmd(output_dir, vals)
- cmd.extend(['-f', 'analyzer',
- '-G', 'config_path=%s' % self.args.input_path[0],
- '-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
- ret, _, _ = self.Run(cmd, env=env)
- if not ret and self.args.verbose:
- outp = json.loads(self.ReadFile(self.args.output_path[0]))
- self.Print()
- self.Print('analyze output:')
- self.PrintJSON(outp)
- self.Print()
-
- return ret
-
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
@@ -1066,86 +886,6 @@ class MetaBuildWrapper(object):
return path[2:].replace('/', self.sep)
return self.RelPath(path, self.chromium_src_dir)
- def ParseGYPConfigPath(self, path):
- rpath = self.ToSrcRelPath(path)
- output_dir, _, _ = rpath.rpartition(self.sep)
- return output_dir
-
- def GYPCmd(self, output_dir, vals):
- if vals['cros_passthrough']:
- if not 'GYP_DEFINES' in os.environ:
- raise MBErr('MB is expecting GYP_DEFINES to be in the environment')
- gyp_defines = os.environ['GYP_DEFINES']
- if not 'chromeos=1' in gyp_defines:
- raise MBErr('GYP_DEFINES is missing chromeos=1: (GYP_DEFINES=%s)' %
- gyp_defines)
- else:
- gyp_defines = vals['gyp_defines']
-
- goma_dir = self.args.goma_dir
-
- # GYP uses shlex.split() to split the gyp defines into separate arguments,
- # so we can support backslashes and and spaces in arguments by quoting
- # them, even on Windows, where this normally wouldn't work.
- if goma_dir and ('\\' in goma_dir or ' ' in goma_dir):
- goma_dir = "'%s'" % goma_dir
-
- if goma_dir:
- gyp_defines += ' gomadir=%s' % goma_dir
-
- android_version_code = self.args.android_version_code
- if android_version_code:
- gyp_defines += ' app_manifest_version_code=%s' % android_version_code
-
- android_version_name = self.args.android_version_name
- if android_version_name:
- gyp_defines += ' app_manifest_version_name=%s' % android_version_name
-
- cmd = [
- self.executable,
- self.args.gyp_script,
- '-G',
- 'output_dir=' + output_dir,
- ]
-
- # Ensure that we have an environment that only contains
- # the exact values of the GYP variables we need.
- env = os.environ.copy()
-
- # This is a terrible hack to work around the fact that
- # //tools/clang/scripts/update.py is invoked by GYP and GN but
- # currently relies on an environment variable to figure out
- # what revision to embed in the command line #defines.
- # For GN, we've made this work via a gn arg that will cause update.py
- # to get an additional command line arg, but getting that to work
- # via GYP_DEFINES has proven difficult, so we rewrite the GYP_DEFINES
- # to get rid of the arg and add the old var in, instead.
- # See crbug.com/582737 for more on this. This can hopefully all
- # go away with GYP.
- m = re.search('llvm_force_head_revision=1\s*', gyp_defines)
- if m:
- env['LLVM_FORCE_HEAD_REVISION'] = '1'
- gyp_defines = gyp_defines.replace(m.group(0), '')
-
- # This is another terrible hack to work around the fact that
- # GYP sets the link concurrency to use via the GYP_LINK_CONCURRENCY
- # environment variable, and not via a proper GYP_DEFINE. See
- # crbug.com/611491 for more on this.
- m = re.search('gyp_link_concurrency=(\d+)(\s*)', gyp_defines)
- if m:
- env['GYP_LINK_CONCURRENCY'] = m.group(1)
- gyp_defines = gyp_defines.replace(m.group(0), '')
-
- env['GYP_GENERATORS'] = 'ninja'
- if 'GYP_CHROMIUM_NO_ACTION' in env:
- del env['GYP_CHROMIUM_NO_ACTION']
- if 'GYP_CROSSCOMPILE' in env:
- del env['GYP_CROSSCOMPILE']
- env['GYP_DEFINES'] = gyp_defines
- if vals['gyp_crosscompile']:
- env['GYP_CROSSCOMPILE'] = '1'
- return cmd, env
-
def RunGNAnalyze(self, vals):
# Analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
@@ -1347,9 +1087,6 @@ class MetaBuildWrapper(object):
if env and var in env:
self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
- print_env('GYP_CROSSCOMPILE')
- print_env('GYP_DEFINES')
- print_env('GYP_LINK_CONCURRENCY')
print_env('LLVM_FORCE_HEAD_REVISION')
if cmd[0] == self.executable:
@@ -1486,7 +1223,6 @@ def QuoteForSet(arg):
def QuoteForCmd(arg):
# First, escape the arg so that CommandLineToArgvW will parse it properly.
- # From //tools/gyp/pylib/gyp/msvs_emulation.py:23.
if arg == '' or ' ' in arg or '"' in arg:
quote_re = re.compile(r'(\\*)"')
arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index 15763750da..0413457eab 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -65,8 +65,6 @@ class FakeMBW(mb.MetaBuildWrapper):
self.files[path] = contents
def Call(self, cmd, env=None, buffer_output=True):
- if env:
- self.cross_compile = env.get('GYP_CROSSCOMPILE')
self.calls.append(cmd)
if self.cmds:
return self.cmds.pop(0)
@@ -112,13 +110,10 @@ TEST_CONFIG = """\
'masters': {
'chromium': {},
'fake_master': {
- 'fake_builder': 'gyp_rel_bot',
- 'fake_gn_builder': 'gn_rel_bot',
- 'fake_gyp_crosscompile_builder': 'gyp_crosscompile',
- 'fake_gn_debug_builder': 'gn_debug_goma',
- 'fake_gyp_builder': 'gyp_debug',
- 'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn',
- 'fake_multi_phase': { 'phase_1': 'gn_phase_1', 'phase_2': 'gn_phase_2'},
+ 'fake_builder': 'rel_bot',
+ 'fake_debug_builder': 'debug_goma',
+ 'fake_args_bot': '//build/args/bots/fake_master/fake_args_bot.gn',
+ 'fake_multi_phase': { 'phase_1': 'phase_1', 'phase_2': 'phase_2'},
'fake_args_file': 'args_file_goma',
'fake_args_file_twice': 'args_file_twice',
},
@@ -126,38 +121,26 @@ TEST_CONFIG = """\
'configs': {
'args_file_goma': ['args_file', 'goma'],
'args_file_twice': ['args_file', 'args_file'],
- 'gyp_rel_bot': ['gyp', 'rel', 'goma'],
- 'gn_debug_goma': ['gn', 'debug', 'goma'],
- 'gyp_debug': ['gyp', 'debug', 'fake_feature1'],
- 'gn_rel_bot': ['gn', 'rel', 'goma'],
- 'gyp_crosscompile': ['gyp', 'crosscompile'],
- 'gn_phase_1': ['gn', 'phase_1'],
- 'gn_phase_2': ['gn', 'phase_2'],
+ 'rel_bot': ['rel', 'goma', 'fake_feature1'],
+ 'debug_goma': ['debug', 'goma'],
+ 'phase_1': ['phase_1'],
+ 'phase_2': ['phase_2'],
},
'mixins': {
- 'crosscompile': {
- 'gyp_crosscompile': True,
- },
'fake_feature1': {
'gn_args': 'enable_doom_melon=true',
- 'gyp_defines': 'doom_melon=1',
},
- 'gyp': {'type': 'gyp'},
- 'gn': {'type': 'gn'},
'goma': {
'gn_args': 'use_goma=true',
- 'gyp_defines': 'goma=1',
},
'args_file': {
'args_file': '//build/args/fake.gn',
},
'phase_1': {
'gn_args': 'phase=1',
- 'gyp_args': 'phase=1',
},
'phase_2': {
'gn_args': 'phase=2',
- 'gyp_args': 'phase=2',
},
'rel': {
'gn_args': 'is_debug=false',
@@ -169,28 +152,6 @@ TEST_CONFIG = """\
}
"""
-GYP_HACKS_CONFIG = """\
-{
- 'masters': {
- 'chromium': {},
- 'fake_master': {
- 'fake_builder': 'fake_config',
- },
- },
- 'configs': {
- 'fake_config': ['fake_mixin'],
- },
- 'mixins': {
- 'fake_mixin': {
- 'type': 'gyp',
- 'gn_args': '',
- 'gyp_defines':
- ('foo=bar llvm_force_head_revision=1 '
- 'gyp_link_concurrency=1 baz=1'),
- },
- },
-}
-"""
TRYSERVER_CONFIG = """\
{
@@ -229,7 +190,7 @@ class UnitTest(unittest.TestCase):
},
}''')
mbw.files.setdefault(
- mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'),
+ mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'),
'is_debug = false\n')
if files:
for path, contents in files.items():
@@ -249,37 +210,6 @@ class UnitTest(unittest.TestCase):
self.assertEqual(mbw.err, err)
return mbw
- def test_clobber(self):
- files = {
- '/fake_src/out/Debug': None,
- '/fake_src/out/Debug/mb_type': None,
- }
- mbw = self.fake_mbw(files)
-
- # The first time we run this, the build dir doesn't exist, so no clobber.
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs, [])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
-
- # The second time we run this, the build dir exists and matches, so no
- # clobber.
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs, [])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
-
- # Now we switch build types; this should result in a clobber.
- self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug'])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
-
- # Now we delete mb_type; this checks the case where the build dir
- # exists but wasn't populated by mb; this should also result in a clobber.
- del mbw.files['/fake_src/out/Debug/mb_type']
- self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
- self.assertEqual(mbw.rmdirs,
- ['/fake_src/out/Debug', '/fake_src/out/Debug'])
- self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
-
def test_analyze(self):
files = {'/tmp/in.json': '''{\
"files": ["foo/foo_unittest.cc"],
@@ -295,7 +225,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
self.assertEqual(out, {
@@ -319,7 +249,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
@@ -342,7 +272,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
@@ -369,7 +299,7 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw(files)
mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
- self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['analyze', '-c', 'debug_goma', '//out/Default',
'/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
out = json.loads(mbw.files['/tmp/out.json'])
@@ -379,9 +309,9 @@ class UnitTest(unittest.TestCase):
# test_targets and additional_compile_targets.
self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
- def test_gn_gen(self):
+ def test_gen(self):
mbw = self.fake_mbw()
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'],
+ self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'],
mbw=mbw, ret=0)
self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
('goma_dir = "/goma"\n'
@@ -394,7 +324,7 @@ class UnitTest(unittest.TestCase):
mbw.out)
mbw = self.fake_mbw(win32=True)
- self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'],
+ self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'],
mbw=mbw, ret=0)
self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
('goma_dir = "c:\\\\goma"\n'
@@ -404,14 +334,14 @@ class UnitTest(unittest.TestCase):
'--check\n', mbw.out)
mbw = self.fake_mbw()
- self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot',
+ self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot',
'//out/Debug'],
mbw=mbw, ret=0)
self.assertEqual(
mbw.files['/fake_src/out/Debug/args.gn'],
- 'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n')
+ 'import("//build/args/bots/fake_master/fake_args_bot.gn")\n')
- def test_gn_gen_args_file_mixins(self):
+ def test_gen_args_file_mixins(self):
mbw = self.fake_mbw()
self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file',
'//out/Debug'], mbw=mbw, ret=0)
@@ -425,14 +355,14 @@ class UnitTest(unittest.TestCase):
self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file_twice',
'//out/Debug'], mbw=mbw, ret=1)
- def test_gn_gen_fails(self):
+ def test_gen_fails(self):
mbw = self.fake_mbw()
mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
- self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1)
+ self.check(['gen', '-c', 'debug_goma', '//out/Default'], mbw=mbw, ret=1)
# TODO(machenbach): Comment back in after swarming file parameter is used.
"""
- def test_gn_gen_swarming(self):
+ def test_gen_swarming(self):
files = {
'/tmp/swarming_targets': 'base_unittests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -448,7 +378,7 @@ class UnitTest(unittest.TestCase):
}
mbw = self.fake_mbw(files)
self.check(['gen',
- '-c', 'gn_debug_goma',
+ '-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'//out/Default'], mbw=mbw, ret=0)
self.assertIn('/fake_src/out/Default/base_unittests.isolate',
@@ -456,7 +386,7 @@ class UnitTest(unittest.TestCase):
self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
mbw.files)
- def test_gn_gen_swarming_script(self):
+ def test_gen_swarming_script(self):
files = {
'/tmp/swarming_targets': 'cc_perftests\n',
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -473,7 +403,7 @@ class UnitTest(unittest.TestCase):
}
mbw = self.fake_mbw(files=files, win32=True)
self.check(['gen',
- '-c', 'gn_debug_goma',
+ '-c', 'debug_goma',
'--swarming-targets-file', '/tmp/swarming_targets',
'--isolate-map-file',
'/fake_src/testing/buildbot/gn_isolate_map.pyl',
@@ -482,9 +412,77 @@ class UnitTest(unittest.TestCase):
mbw.files)
self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
mbw.files)
- """ # pylint: disable=pointless-string-statement
- def test_gn_isolate(self):
+
+ def test_multiple_isolate_maps(self):
+ files = {
+ '/tmp/swarming_targets': 'cc_perftests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+ "{'cc_perftests2': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+ "cc_perftests\n"
+ ),
+ }
+ mbw = self.fake_mbw(files=files, win32=True)
+ self.check(['gen',
+ '-c', 'debug_goma',
+ '--swarming-targets-file', '/tmp/swarming_targets',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+ '//out/Default'], mbw=mbw, ret=0)
+ self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
+ mbw.files)
+ self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
+ mbw.files)
+
+
+ def test_duplicate_isolate_maps(self):
+ files = {
+ '/tmp/swarming_targets': 'cc_perftests\n',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+ "{'cc_perftests': {"
+ " 'label': '//cc:cc_perftests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ 'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+ "cc_perftests\n"
+ ),
+ }
+ mbw = self.fake_mbw(files=files, win32=True)
+ # Check that passing duplicate targets into mb fails.
+ self.check(['gen',
+ '-c', 'debug_goma',
+ '--swarming-targets-file', '/tmp/swarming_targets',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+ '--isolate-map-file',
+ '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+ '//out/Default'], mbw=mbw, ret=1)
+
+ def test_isolate(self):
files = {
'/fake_src/out/Default/toolchain.ninja': "",
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -498,7 +496,7 @@ class UnitTest(unittest.TestCase):
"base_unittests\n"
),
}
- self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['isolate', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
# test running isolate on an existing build_dir
@@ -506,11 +504,10 @@ class UnitTest(unittest.TestCase):
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
- files['/fake_src/out/Default/mb_type'] = 'gn\n'
self.check(['isolate', '//out/Default', 'base_unittests'],
files=files, ret=0)
- def test_gn_run(self):
+ def test_run(self):
files = {
'/fake_src/testing/buildbot/gn_isolate_map.pyl': (
"{'base_unittests': {"
@@ -523,55 +520,51 @@ class UnitTest(unittest.TestCase):
"base_unittests\n"
),
}
- self.check(['run', '-c', 'gn_debug_goma', '//out/Default',
+ self.check(['run', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
- def test_gn_lookup(self):
- self.check(['lookup', '-c', 'gn_debug_goma'], ret=0)
+ def test_run_swarmed(self):
+ files = {
+ '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+ "{'base_unittests': {"
+ " 'label': '//base:base_unittests',"
+ " 'type': 'raw',"
+ " 'args': [],"
+ "}}\n"
+ ),
+ '/fake_src/out/Default/base_unittests.runtime_deps': (
+ "base_unittests\n"
+ ),
+ }
+
+ def run_stub(cmd, **_kwargs):
+ if 'isolate.py' in cmd[1]:
+ return 0, 'fake_hash base_unittests', ''
+ else:
+ return 0, '', ''
+
+ mbw = self.fake_mbw(files=files)
+ mbw.Run = run_stub
+ self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
+ 'base_unittests'], mbw=mbw, ret=0)
+ self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
+ '//out/Default', 'base_unittests'], mbw=mbw, ret=0)
+ """ # pylint: disable=pointless-string-statement
- def test_gn_lookup_goma_dir_expansion(self):
- self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
+ def test_lookup(self):
+ self.check(['lookup', '-c', 'debug_goma'], ret=0)
+
+ def test_lookup_goma_dir_expansion(self):
+ self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
out=('\n'
'Writing """\\\n'
+ 'enable_doom_melon = true\n'
'goma_dir = "/foo"\n'
'is_debug = false\n'
'use_goma = true\n'
'""" to _path_/args.gn.\n\n'
'/fake_src/buildtools/linux64/gn gen _path_\n'))
- def test_gyp_analyze(self):
- mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
- '/tmp/in.json', '/tmp/out.json'], ret=0)
- self.assertIn('analyzer', mbw.calls[0])
-
- def test_gyp_crosscompile(self):
- mbw = self.fake_mbw()
- self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'],
- mbw=mbw, ret=0)
- self.assertTrue(mbw.cross_compile)
-
- def test_gyp_gen(self):
- self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'],
- ret=0,
- out=("GYP_DEFINES='goma=1 gomadir=/goma'\n"
- "python build/gyp_chromium -G output_dir=out\n"))
-
- mbw = self.fake_mbw(win32=True)
- self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'],
- mbw=mbw, ret=0,
- out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n"
- "python build\\gyp_chromium -G output_dir=out\n"))
-
- def test_gyp_gen_fails(self):
- mbw = self.fake_mbw()
- mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
- self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1)
-
- def test_gyp_lookup_goma_dir_expansion(self):
- self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
- out=("GYP_DEFINES='goma=1 gomadir=/foo'\n"
- "python build/gyp_chromium -G output_dir=_path_\n"))
-
def test_help(self):
orig_stdout = sys.stdout
try:
@@ -589,7 +582,7 @@ class UnitTest(unittest.TestCase):
self.assertIn('Must specify a build --phase', mbw.out)
# Check that passing a --phase to a single-phase builder fails.
- mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder',
+ mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_builder',
'--phase', 'phase_1'], ret=1)
self.assertIn('Must not specify a build --phase', mbw.out)
@@ -611,16 +604,6 @@ class UnitTest(unittest.TestCase):
mbw = self.fake_mbw()
self.check(['validate'], mbw=mbw, ret=0)
- def test_gyp_env_hacks(self):
- mbw = self.fake_mbw()
- mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
- self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
- ret=0,
- out=("GYP_DEFINES='foo=bar baz=1'\n"
- "GYP_LINK_CONCURRENCY=1\n"
- "LLVM_FORCE_HEAD_REVISION=1\n"
- "python build/gyp_chromium -G output_dir=_path_\n"))
-
def test_buildbucket(self):
mbw = self.fake_mbw()
mbw.files[mbw.default_config] = TRYSERVER_CONFIG
diff --git a/deps/v8/tools/mingw-generate-makefiles.sh b/deps/v8/tools/mingw-generate-makefiles.sh
deleted file mode 100755
index 67715fc15b..0000000000
--- a/deps/v8/tools/mingw-generate-makefiles.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/sh
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Monkey-patch GYP.
-cat > tools/gyp/gyp.mingw << EOF
-#!/usr/bin/env python
-
-# Copyright (c) 2009 Google Inc. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import sys
-
-# TODO(mark): sys.path manipulation is some temporary testing stuff.
-try:
- import gyp
-except ImportError, e:
- import os.path
- sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
- import gyp
-
-def MonkeyBuildFileTargets(target_list, build_file):
- """From a target_list, returns the subset from the specified build_file.
- """
- build_file = build_file.replace('/', '\\\\')
- return [p for p in target_list if gyp.common.BuildFile(p) == build_file]
-gyp.common.BuildFileTargets = MonkeyBuildFileTargets
-
-import gyp.generator.make
-import os
-def Monkey_ITIP(self):
- """Returns the location of the final output for an installable target."""
- sep = os.path.sep
- # Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
- # rely on this. Emulate this behavior for mac.
- if (self.type == 'shared_library' and
- (self.flavor != 'mac' or self.toolset != 'target')):
- # Install all shared libs into a common directory (per toolset) for
- # convenient access with LD_LIBRARY_PATH.
- return '\$(builddir)%slib.%s%s%s' % (sep, self.toolset, sep, self.alias)
- return '\$(builddir)' + sep + self.alias
-gyp.generator.make.MakefileWriter._InstallableTargetInstallPath = Monkey_ITIP
-
-if __name__ == '__main__':
- sys.exit(gyp.main(sys.argv[1:]))
-EOF
-
-# Delete old generated Makefiles.
-find out -name '*.mk' -or -name 'Makefile*' -exec rm {} \;
-
-# Generate fresh Makefiles.
-mv tools/gyp/gyp tools/gyp/gyp.original
-mv tools/gyp/gyp.mingw tools/gyp/gyp
-make out/Makefile.ia32
-mv tools/gyp/gyp tools/gyp/gyp.mingw
-mv tools/gyp/gyp.original tools/gyp/gyp
-
-# Patch generated Makefiles: replace most backslashes with forward slashes,
-# fix library names in linker flags.
-FILES=$(find out -name '*.mk' -or -name 'Makefile*')
-for F in $FILES ; do
- echo "Patching $F..."
- cp $F $F.orig
- cat $F.orig \
- | sed -e 's|\([)a-zA-Z0-9]\)\\\([a-zA-Z]\)|\1/\2|g' \
- -e 's|\([)a-zA-Z0-9]\)\\\\\([a-zA-Z]\)|\1/\2|g' \
- -e 's|'%s/n'|'%s\\\\n'|g' \
- -e 's|-lwinmm\.lib|-lwinmm|g' \
- -e 's|-lws2_32\.lib|-lws2_32|g' \
- > $F
- rm $F.orig
-done
diff --git a/deps/v8/tools/mips_toolchain.tar.gz.sha1 b/deps/v8/tools/mips_toolchain.tar.gz.sha1
new file mode 100644
index 0000000000..8d4572336a
--- /dev/null
+++ b/deps/v8/tools/mips_toolchain.tar.gz.sha1
@@ -0,0 +1 @@
+d51b5d903340262d8d13ecd51054c16a901b3cf3 \ No newline at end of file
diff --git a/deps/v8/tools/node/build_gn.py b/deps/v8/tools/node/build_gn.py
index 8ab2a635ea..e95c3491e7 100755
--- a/deps/v8/tools/node/build_gn.py
+++ b/deps/v8/tools/node/build_gn.py
@@ -17,6 +17,7 @@ are represented as 1/0. E.g.
v8_enable_disassembler=0
"""
+import argparse
import os
import subprocess
import sys
@@ -31,52 +32,71 @@ GN_ARGS = [
"use_sysroot = false",
]
-BUILD_SUBDIR = "gn"
+BUILD_TARGET = "v8_monolith"
-# TODO: make this cross-platform.
-GN_SUBDIR = ["buildtools", "linux64", "gn"]
+def FindGn(options):
+ if options.host_os == "linux":
+ os_path = "linux64"
+ elif options.host_os == "mac":
+ os_path = "mac"
+ elif options.host_os == "win":
+ os_path = "win"
+ else:
+ raise "Operating system not supported by GN"
+ return os.path.join(options.v8_path, "buildtools", os_path, "gn")
-def Build(v8_path, build_path, depot_tools, is_debug, build_flags):
+def GenerateBuildFiles(options):
print "Setting GN args."
- lines = []
- lines.extend(GN_ARGS)
- for flag in build_flags:
+ gn = FindGn(options)
+ gn_args = []
+ gn_args.extend(GN_ARGS)
+ for flag in options.flag:
flag = flag.replace("=1", "=true")
flag = flag.replace("=0", "=false")
flag = flag.replace("target_cpu=ia32", "target_cpu=\"x86\"")
- lines.append(flag)
- lines.append("is_debug = %s" % ("true" if is_debug else "false"))
- with open(os.path.join(build_path, "args.gn"), "w") as args_file:
- args_file.write("\n".join(lines))
- gn = os.path.join(v8_path, *GN_SUBDIR)
- subprocess.check_call([gn, "gen", "-C", build_path], cwd=v8_path)
- ninja = os.path.join(depot_tools, "ninja")
+ gn_args.append(flag)
+ if options.mode == "DEBUG":
+ gn_args.append("is_debug = true")
+ else:
+ gn_args.append("is_debug = false")
+
+ if not os.path.isdir(options.build_path):
+ os.makedirs(options.build_path)
+ with open(os.path.join(options.build_path, "args.gn"), "w") as args_file:
+ args_file.write("\n".join(gn_args))
+ subprocess.check_call([gn, "gen", "-C", options.build_path],
+ cwd=options.v8_path)
+
+def Build(options):
print "Building."
- subprocess.check_call([ninja, "-v", "-C", build_path, "v8_monolith"],
- cwd=v8_path)
+ depot_tools = node_common.EnsureDepotTools(options.v8_path, False)
+ ninja = os.path.join(depot_tools, "ninja")
+ subprocess.check_call([ninja, "-v", "-C", options.build_path, BUILD_TARGET],
+ cwd=options.v8_path)
+
+def ParseOptions(args):
+ parser = argparse.ArgumentParser(
+ description="Build %s with GN" % BUILD_TARGET)
+ parser.add_argument("--mode", help="Build mode (Release/Debug)")
+ parser.add_argument("--v8_path", help="Path to V8")
+ parser.add_argument("--build_path", help="Path to build result")
+ parser.add_argument("--flag", help="Translate GYP flag to GN",
+ action="append")
+ parser.add_argument("--host_os", help="Current operating system")
+ options = parser.parse_args(args)
-def Main(v8_path, build_path, is_debug, build_flags):
- # Verify paths.
- v8_path = os.path.abspath(v8_path)
- assert os.path.isdir(v8_path)
- build_path = os.path.abspath(build_path)
- build_path = os.path.join(build_path, BUILD_SUBDIR)
- if not os.path.isdir(build_path):
- os.makedirs(build_path)
+ assert options.host_os
+ assert options.mode == "Debug" or options.mode == "Release"
- # Check that we have depot tools.
- depot_tools = node_common.EnsureDepotTools(v8_path, False)
+ assert options.v8_path
+ options.v8_path = os.path.abspath(options.v8_path)
+ assert os.path.isdir(options.v8_path)
- # Build with GN.
- Build(v8_path, build_path, depot_tools, is_debug, build_flags)
+ assert options.build_path
+ options.build_path = os.path.abspath(options.build_path)
+ return options
if __name__ == "__main__":
- # TODO: use argparse to parse arguments.
- build_mode = sys.argv[1]
- v8_path = sys.argv[2]
- build_path = sys.argv[3]
- assert build_mode == "Debug" or build_mode == "Release"
- is_debug = build_mode == "Debug"
- # TODO: introduce "--" flag for pass-through flags.
- build_flags = sys.argv[4:]
- Main(v8_path, build_path, is_debug, build_flags)
+ options = ParseOptions(sys.argv[1:])
+ GenerateBuildFiles(options)
+ Build(options)
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
index a3e6d74917..09a4e6cb97 100755
--- a/deps/v8/tools/node/fetch_deps.py
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -32,7 +32,6 @@ GCLIENT_SOLUTION = [
"v8/test/mozilla/data" : None,
"v8/test/test262/data" : None,
"v8/test/test262/harness" : None,
- "v8/test/wasm-js" : None,
"v8/third_party/android_tools" : None,
"v8/third_party/catapult" : None,
"v8/third_party/colorama/src" : None,
@@ -41,9 +40,6 @@ GCLIENT_SOLUTION = [
"v8/tools/luci-go" : None,
"v8/tools/swarming_client" : None,
},
- "custom_vars": {
- "build_for_node" : True,
- },
},
]
@@ -56,6 +52,8 @@ def EnsureGit(v8_path):
return False
print "Initializing temporary git repository in v8."
subprocess.check_call(["git", "init"], cwd=v8_path)
+ subprocess.check_call(["git", "config", "user.name", "\"Ada Lovelace\""], cwd=v8_path)
+ subprocess.check_call(["git", "config", "user.email", "\"ada@lovela.ce\""], cwd=v8_path)
subprocess.check_call(["git", "commit", "--allow-empty", "-m", "init"],
cwd=v8_path)
return True
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
index ebd953a903..5d7e4daff4 100755
--- a/deps/v8/tools/node/update_node.py
+++ b/deps/v8/tools/node/update_node.py
@@ -54,6 +54,9 @@ ADD_TO_GITIGNORE = [ "/testing/gtest/*",
"!/third_party/jinja2",
"!/third_party/markupsafe" ]
+# Node.js owns deps/v8/gypfiles in their downstream repository.
+FILES_TO_KEEP = [ "gypfiles" ]
+
def RunGclient(path):
assert os.path.isdir(path)
print ">> Running gclient sync"
@@ -73,7 +76,7 @@ def CommitPatch(options):
cwd=options.v8_path,
)
-def UpdateTarget(repository, options):
+def UpdateTarget(repository, options, files_to_keep):
source = os.path.join(options.v8_path, *repository)
target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
print ">> Updating target directory %s" % target
@@ -83,16 +86,24 @@ def UpdateTarget(repository, options):
# Remove possible remnants of previous incomplete runs.
node_common.UninitGit(target)
- git_commands = [
- ["git", "init"], # initialize target repo
- ["git", "remote", "add", "origin", source], # point to the source repo
- ["git", "fetch", "origin", "HEAD"], # sync to the current branch
- ["git", "reset", "--hard", "FETCH_HEAD"], # reset to the current branch
- ["git", "clean", "-fd"], # delete removed files
- ]
+ git_args = []
+ git_args.append(["init"]) # initialize target repo
+
+ if files_to_keep:
+ git_args.append(["add"] + files_to_keep) # add and commit
+ git_args.append(["commit", "-m", "keep files"]) # files we want to keep
+
+ git_args.append(["remote", "add", "source", source]) # point to source repo
+ git_args.append(["fetch", "source", "HEAD"]) # sync to current branch
+ git_args.append(["checkout", "-f", "FETCH_HEAD"]) # switch to that branch
+ git_args.append(["clean", "-fd"]) # delete removed files
+
+ if files_to_keep:
+ git_args.append(["cherry-pick", "master"]) # restore kept files
+
try:
- for command in git_commands:
- subprocess.check_call(command, cwd=target)
+ for args in git_args:
+ subprocess.check_call(["git"] + args, cwd=target)
except:
raise
finally:
@@ -155,11 +166,11 @@ def Main(args):
if options.with_patch:
CommitPatch(options)
# Update main V8 repository.
- UpdateTarget([""], options)
+ UpdateTarget([""], options, FILES_TO_KEEP)
# Patch .gitignore before updating sub-repositories.
UpdateGitIgnore(options)
for repo in SUB_REPOSITORIES:
- UpdateTarget(repo, options)
+ UpdateTarget(repo, options, None)
if options.commit:
CreateCommit(options)
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index 1a49223996..bcee2b8258 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -39,7 +39,6 @@
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
-#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/parsing/scanner-character-streams.h"
@@ -59,9 +58,9 @@ class StringResource8 : public v8::String::ExternalOneByteStringResource {
int length_;
};
-std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
- const char* fname, Encoding encoding, int repeat, v8::Isolate* isolate,
- v8::Local<v8::Context> context) {
+v8::base::TimeDelta RunBaselineParser(const char* fname, Encoding encoding,
+ int repeat, v8::Isolate* isolate,
+ v8::Local<v8::Context> context) {
int length = 0;
const byte* source = ReadFileAndRepeat(fname, &length, repeat);
v8::Local<v8::String> source_handle;
@@ -87,42 +86,21 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
break;
}
}
- v8::base::TimeDelta parse_time1, parse_time2;
+ v8::base::TimeDelta parse_time1;
Handle<Script> script =
reinterpret_cast<i::Isolate*>(isolate)->factory()->NewScript(
v8::Utils::OpenHandle(*source_handle));
- i::ScriptData* cached_data_impl = NULL;
- // First round of parsing (produce data to cache).
- {
- ParseInfo info(script);
- info.set_cached_data(&cached_data_impl);
- info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
- v8::base::ElapsedTimer timer;
- timer.Start();
- bool success =
- parsing::ParseProgram(&info, reinterpret_cast<i::Isolate*>(isolate));
- parse_time1 = timer.Elapsed();
- if (!success) {
- fprintf(stderr, "Parsing failed\n");
- return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
- }
- }
- // Second round of parsing (consume cached data).
- {
- ParseInfo info(script);
- info.set_cached_data(&cached_data_impl);
- info.set_compile_options(v8::ScriptCompiler::kConsumeParserCache);
- v8::base::ElapsedTimer timer;
- timer.Start();
- bool success =
- parsing::ParseProgram(&info, reinterpret_cast<i::Isolate*>(isolate));
- parse_time2 = timer.Elapsed();
- if (!success) {
- fprintf(stderr, "Parsing failed\n");
- return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
- }
+ ParseInfo info(script);
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ bool success =
+ parsing::ParseProgram(&info, reinterpret_cast<i::Isolate*>(isolate));
+ parse_time1 = timer.Elapsed();
+ if (!success) {
+ fprintf(stderr, "Parsing failed\n");
+ return v8::base::TimeDelta();
}
- return std::make_pair(parse_time1, parse_time2);
+ return parse_time1;
}
@@ -167,19 +145,14 @@ int main(int argc, char* argv[]) {
{
v8::Context::Scope scope(context);
double first_parse_total = 0;
- double second_parse_total = 0;
for (size_t i = 0; i < fnames.size(); i++) {
- std::pair<v8::base::TimeDelta, v8::base::TimeDelta> time =
- RunBaselineParser(fnames[i].c_str(), encoding, repeat, isolate,
- context);
- first_parse_total += time.first.InMillisecondsF();
- second_parse_total += time.second.InMillisecondsF();
+ v8::base::TimeDelta time = RunBaselineParser(
+ fnames[i].c_str(), encoding, repeat, isolate, context);
+ first_parse_total += time.InMillisecondsF();
}
if (benchmark.empty()) benchmark = "Baseline";
- printf("%s(FirstParseRunTime): %.f ms\n", benchmark.c_str(),
+ printf("%s(ParseRunTime): %.f ms\n", benchmark.c_str(),
first_parse_total);
- printf("%s(SecondParseRunTime): %.f ms\n", benchmark.c_str(),
- second_parse_total);
}
}
v8::V8::Dispose();
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 9ac26ddb16..917b6e2383 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -52,6 +52,7 @@ from testrunner.local import utils
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
+# We now run our own header guard check in PRESUBMIT.py.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
@@ -280,8 +281,7 @@ class SourceProcessor(SourceFileProcessor):
Check that all files include a copyright notice and no trailing whitespaces.
"""
- RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
- '.status', '.gyp', '.gypi']
+ RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status']
def __init__(self):
self.runtime_function_call_pattern = self.CreateRuntimeFunctionCallMatcher()
@@ -331,7 +331,7 @@ class SourceProcessor(SourceFileProcessor):
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
- name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
+ name in ('third_party', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index d295e37d64..5dd60df459 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -51,6 +51,7 @@ PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
WATCHLISTS_FILE = "WATCHLISTS"
+RELEASE_WORKDIR = "/tmp/v8-release-scripts-work-dir/"
# V8 base directory.
V8_BASE = os.path.dirname(
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 877d121b49..bf526bf5d8 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -241,11 +241,12 @@ class MergeToBranch(ScriptsBase):
def _Config(self):
return {
"BRANCHNAME": "prepare-merge",
- "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+ "PERSISTFILE_BASENAME": RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
"ALREADY_MERGING_SENTINEL_FILE":
- "/tmp/v8-merge-to-branch-tempfile-already-merging",
- "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
- "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+ "TEMPORARY_PATCH_FILE":
+ RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+ "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
}
def _Steps(self):
diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py
index 2dd43eae3a..44ed858f7e 100755
--- a/deps/v8/tools/release/roll_merge.py
+++ b/deps/v8/tools/release/roll_merge.py
@@ -262,11 +262,13 @@ class RollMerge(ScriptsBase):
def _Config(self):
return {
"BRANCHNAME": "prepare-merge",
- "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+ "PERSISTFILE_BASENAME":
+ RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
"ALREADY_MERGING_SENTINEL_FILE":
- "/tmp/v8-merge-to-branch-tempfile-already-merging",
- "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
- "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+ "TEMPORARY_PATCH_FILE":
+ RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+ "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
}
def _Steps(self):
diff --git a/deps/v8/tools/release/update_node.py b/deps/v8/tools/release/update_node.py
deleted file mode 100755
index d060e5c615..0000000000
--- a/deps/v8/tools/release/update_node.py
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Use this script to update V8 in a Node.js checkout.
-
-Requirements:
- - Node.js checkout in which V8 should be updated.
- - V8 checkout at the commit to which Node.js should be updated.
-
-Usage:
- $ update_node.py <path_to_v8> <path_to_node>
-
- This will synchronize the content of <path_to_node>/deps/v8 with <path_to_v8>,
- and a few V8 dependencies require in Node.js. It will also update .gitignore
- appropriately.
-
-Optional flags:
- --gclient Run `gclient sync` on the V8 checkout before updating.
- --commit Create commit with the updated V8 in the Node.js checkout.
- --with-patch Also include currently staged files in the V8 checkout.
-"""
-
-import argparse
-import os
-import shutil
-import subprocess
-import sys
-import stat
-
-TARGET_SUBDIR = os.path.join("deps", "v8")
-
-SUB_REPOSITORIES = [ ["base", "trace_event", "common"],
- ["testing", "gtest"],
- ["third_party", "jinja2"],
- ["third_party", "markupsafe"] ]
-
-DELETE_FROM_GITIGNORE = [ "/base",
- "/testing/gtest",
- "/third_party/jinja2",
- "/third_party/markupsafe" ]
-
-# Node.js requires only a single header file from gtest to build V8.
-# Both jinja2 and markupsafe are required to generate part of the inspector.
-ADD_TO_GITIGNORE = [ "/testing/gtest/*",
- "!/testing/gtest/include",
- "/testing/gtest/include/*",
- "!/testing/gtest/include/gtest",
- "/testing/gtest/include/gtest/*",
- "!/testing/gtest/include/gtest/gtest_prod.h",
- "!/third_party/jinja2",
- "!/third_party/markupsafe" ]
-
-def RunGclient(path):
- assert os.path.isdir(path)
- print ">> Running gclient sync"
- subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
-
-def UninitGit(path):
- target = os.path.join(path, ".git")
- if os.path.isdir(target):
- print ">> Cleaning up %s" % path
- def OnRmError(func, path, exec_info):
- # This might happen on Windows
- os.chmod(path, stat.S_IWRITE)
- os.unlink(path)
- shutil.rmtree(target, onerror=OnRmError)
-
-def CommitPatch(options):
- """Makes a dummy commit for the changes in the index.
-
- On trybots, bot_updated applies the patch to the index. We commit it to make
- the fake git clone fetch it into node.js. We can leave the commit, as
- bot_update will ensure a clean state on each run.
- """
- print ">> Committing patch"
- subprocess.check_call(
- ["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
- "commit", "--allow-empty", "-m", "placeholder-commit"],
- cwd=options.v8_path,
- )
-
-def UpdateTarget(repository, options):
- source = os.path.join(options.v8_path, *repository)
- target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
- print ">> Updating target directory %s" % target
- print ">> from active branch at %s" % source
- if not os.path.exists(target):
- os.makedirs(target)
- # Remove possible remnants of previous incomplete runs.
- UninitGit(target)
-
- git_commands = [
- ["git", "init"], # initialize target repo
- ["git", "remote", "add", "origin", source], # point to the source repo
- ["git", "fetch", "origin", "HEAD"], # sync to the current branch
- ["git", "reset", "--hard", "FETCH_HEAD"], # reset to the current branch
- ["git", "clean", "-fd"], # delete removed files
- ]
- try:
- for command in git_commands:
- subprocess.check_call(command, cwd=target)
- except:
- raise
- finally:
- UninitGit(target)
-
-def UpdateGitIgnore(options):
- file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
- assert os.path.isfile(file_name)
- print ">> Updating .gitignore with lines"
- with open(file_name) as gitignore:
- content = gitignore.readlines()
- content = [x.strip() for x in content]
- for x in DELETE_FROM_GITIGNORE:
- if x in content:
- print "- %s" % x
- content.remove(x)
- for x in ADD_TO_GITIGNORE:
- if x not in content:
- print "+ %s" % x
- content.append(x)
- content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
- with open(file_name, "w") as gitignore:
- for x in content:
- gitignore.write("%s\n" % x)
-
-def CreateCommit(options):
- print ">> Creating commit."
- # Find git hash from source.
- githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
- cwd=options.v8_path).strip()
- # Create commit at target.
- git_commands = [
- ["git", "checkout", "-b", "update_v8_to_%s" % githash], # new branch
- ["git", "add", "."], # add files
- ["git", "commit", "-m", "Update V8 to %s" % githash] # new commit
- ]
- for command in git_commands:
- subprocess.check_call(command, cwd=options.node_path)
-
-def ParseOptions(args):
- parser = argparse.ArgumentParser(description="Update V8 in Node.js")
- parser.add_argument("v8_path", help="Path to V8 checkout")
- parser.add_argument("node_path", help="Path to Node.js checkout")
- parser.add_argument("--gclient", action="store_true", help="Run gclient sync")
- parser.add_argument("--commit", action="store_true", help="Create commit")
- parser.add_argument("--with-patch", action="store_true",
- help="Apply also staged files")
- options = parser.parse_args(args)
- assert os.path.isdir(options.v8_path)
- options.v8_path = os.path.abspath(options.v8_path)
- assert os.path.isdir(options.node_path)
- options.node_path = os.path.abspath(options.node_path)
- return options
-
-def Main(args):
- options = ParseOptions(args)
- if options.gclient:
- RunGclient(options.v8_path)
- # Commit patch on trybots to main V8 repository.
- if options.with_patch:
- CommitPatch(options)
- # Update main V8 repository.
- UpdateTarget([""], options)
- # Patch .gitignore before updating sub-repositories.
- UpdateGitIgnore(options)
- for repo in SUB_REPOSITORIES:
- UpdateTarget(repo, options)
- if options.commit:
- CreateCommit(options)
-
-if __name__ == "__main__":
- Main(sys.argv[1:])
diff --git a/deps/v8/tools/run-deopt-fuzzer.gyp b/deps/v8/tools/run-deopt-fuzzer.gyp
deleted file mode 100644
index 9eb6b538bc..0000000000
--- a/deps/v8/tools/run-deopt-fuzzer.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'run_deopt_fuzzer_run',
- 'type': 'none',
- 'dependencies': [
- '../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'run-deopt-fuzzer.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/run-deopt-fuzzer.isolate b/deps/v8/tools/run-deopt-fuzzer.isolate
deleted file mode 100644
index 196fb5dbbc..0000000000
--- a/deps/v8/tools/run-deopt-fuzzer.isolate
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'command': [
- 'run-deopt-fuzzer.py',
- ],
- 'files': [
- 'run-deopt-fuzzer.py',
- ],
- },
- 'includes': [
- 'testrunner/testrunner.isolate',
- '../src/d8.isolate',
- '../test/mjsunit/mjsunit.isolate',
- '../test/webkit/webkit.isolate',
- ],
-}
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
deleted file mode 100755
index ac2344b530..0000000000
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import sys
-
-from testrunner import deopt_fuzzer
-
-
-if __name__ == "__main__":
- sys.exit(deopt_fuzzer.DeoptFuzzer().execute())
diff --git a/deps/v8/tools/run-num-fuzzer.gyp b/deps/v8/tools/run-num-fuzzer.gyp
deleted file mode 100644
index bd3b9d6423..0000000000
--- a/deps/v8/tools/run-num-fuzzer.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'run_num_fuzzer_run',
- 'type': 'none',
- 'dependencies': [
- '../src/d8.gyp:d8_run',
- ],
- 'includes': [
- '../gypfiles/features.gypi',
- '../gypfiles/isolate.gypi',
- ],
- 'sources': [
- 'run-num-fuzzer.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/tools/run-num-fuzzer.isolate b/deps/v8/tools/run-num-fuzzer.isolate
index d0aca421a7..e9acbd4cb0 100644
--- a/deps/v8/tools/run-num-fuzzer.isolate
+++ b/deps/v8/tools/run-num-fuzzer.isolate
@@ -4,11 +4,10 @@
{
'variables': {
'command': [
- 'run-deopt-fuzzer.py',
+ 'run-num-fuzzer.py',
],
'files': [
- 'run-deopt-fuzzer.py',
- 'run-gc-fuzzer.py',
+ 'run-num-fuzzer.py',
],
},
'includes': [
diff --git a/deps/v8/tools/run-gc-fuzzer.py b/deps/v8/tools/run-num-fuzzer.py
index 6311d4fd29..9b5a065158 100755
--- a/deps/v8/tools/run-gc-fuzzer.py
+++ b/deps/v8/tools/run-num-fuzzer.py
@@ -1,14 +1,14 @@
#!/usr/bin/env python
#
-# Copyright 2017 the V8 project authors. All rights reserved.
+# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
-from testrunner import gc_fuzzer
+from testrunner import num_fuzzer
if __name__ == "__main__":
- sys.exit(gc_fuzzer.GCFuzzer().execute())
+ sys.exit(num_fuzzer.NumFuzzer().execute())
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 3823eb510c..e19f6a056b 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -1116,7 +1116,8 @@ def Main(args):
# Traverse graph/trace tree and iterate over all runnables.
for runnable in FlattenRunnables(root, NodeCB):
runnable_name = "/".join(runnable.graphs)
- if not runnable_name.startswith(options.filter):
+ if (not runnable_name.startswith(options.filter) and
+ runnable_name + "/" != options.filter):
continue
print ">>> Running suite: %s" % runnable_name
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 8fc09eed7b..7721360e2a 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -5,8 +5,10 @@
from collections import OrderedDict
import json
+import multiprocessing
import optparse
import os
+import shlex
import sys
@@ -17,10 +19,14 @@ sys.path.insert(
os.path.dirname(os.path.abspath(__file__))))
-from local import testsuite
-from local import utils
-
-from testproc.shard import ShardProc
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.test_config import TestConfig
+from testrunner.testproc import progress
+from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.shard import ShardProc
+from testrunner.testproc.sigproc import SignalProc
+from testrunner.testproc.timeout import TimeoutProc
BASE_DIR = (
@@ -31,8 +37,6 @@ BASE_DIR = (
DEFAULT_OUT_GN = 'out.gn'
-ARCH_GUESS = utils.DefaultArch()
-
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in separate steps on the bots.
@@ -90,6 +94,16 @@ TEST_MAP = {
],
}
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+ "mips",
+ "mipsel",
+ "mips64",
+ "mips64el",
+ "s390",
+ "s390x",
+ "arm64"]
+
class ModeConfig(object):
def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
@@ -138,6 +152,12 @@ MODES = {
),
}
+PROGRESS_INDICATORS = {
+ 'verbose': progress.VerboseProgressIndicator,
+ 'dots': progress.DotsProgressIndicator,
+ 'color': progress.ColorProgressIndicator,
+ 'mono': progress.MonochromeProgressIndicator,
+}
class TestRunnerError(Exception):
pass
@@ -162,6 +182,10 @@ class BuildConfig(object):
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
self.ubsan_vptr = build_config['is_ubsan_vptr']
+ # Export only for MIPS target
+ if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
+ self.mips_arch_variant = build_config['mips_arch_variant']
+ self.mips_use_msa = build_config['mips_use_msa']
def __str__(self):
detected_options = []
@@ -204,6 +228,10 @@ class BaseTestRunner(object):
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
+ if options.swarming:
+ # Swarming doesn't print how isolated commands are called. Lets make
+ # this less cryptic by printing it ourselves.
+ print ' '.join(sys.argv)
self._load_build_config(options)
@@ -215,14 +243,19 @@ class BaseTestRunner(object):
raise
args = self._parse_test_args(args)
- suites = self._get_suites(args, options.verbose)
+ suites = self._get_suites(args, options)
+ self._prepare_suites(suites, options)
self._setup_env()
- return self._do_execute(suites, args, options)
+
+ print(">>> Running tests for %s.%s" % (self.build_config.arch,
+ self.mode_name))
+ tests = [t for s in suites for t in s.tests]
+ return self._do_execute(tests, args, options)
except TestRunnerError:
- return 1
+ return utils.EXIT_CODE_INTERNAL_ERROR
except KeyboardInterrupt:
- return 2
+ return utils.EXIT_CODE_INTERRUPTED
def _create_parser(self):
parser = optparse.OptionParser()
@@ -247,14 +280,63 @@ class BaseTestRunner(object):
" and buildbot builds): %s" % MODES.keys())
parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
"directory will be used")
- parser.add_option("-v", "--verbose", help="Verbose output",
- default=False, action="store_true")
- parser.add_option("--shard-count",
- help="Split tests into this number of shards",
- default=1, type="int")
- parser.add_option("--shard-run",
- help="Run this shard from the split up tests.",
- default=1, type="int")
+ parser.add_option("--total-timeout-sec", default=0, type="int",
+ help="How long should fuzzer run")
+ parser.add_option("--swarming", default=False, action="store_true",
+ help="Indicates running test driver on swarming.")
+
+ parser.add_option("-j", help="The number of parallel tasks to run",
+ default=0, type=int)
+
+ # Shard
+ parser.add_option("--shard-count", default=1, type=int,
+ help="Split tests into this number of shards")
+ parser.add_option("--shard-run", default=1, type=int,
+ help="Run this shard from the split up tests.")
+
+ # Progress
+ parser.add_option("-p", "--progress",
+ choices=PROGRESS_INDICATORS.keys(), default="mono",
+ help="The style of progress indicator (verbose, dots, "
+ "color, mono)")
+ parser.add_option("--json-test-results",
+ help="Path to a file for storing json results.")
+ parser.add_option("--junitout", help="File name of the JUnit output")
+ parser.add_option("--junittestsuite", default="v8tests",
+ help="The testsuite name in the JUnit output file")
+
+ # Rerun
+ parser.add_option("--rerun-failures-count", default=0, type=int,
+ help="Number of times to rerun each failing test case. "
+ "Very slow tests will be rerun only once.")
+ parser.add_option("--rerun-failures-max", default=100, type=int,
+ help="Maximum number of failing test cases to rerun")
+
+ # Test config
+ parser.add_option("--command-prefix", default="",
+ help="Prepended to each shell command used to run a test")
+ parser.add_option("--extra-flags", action="append", default=[],
+ help="Additional flags to pass to each test command")
+ parser.add_option("--isolates", action="store_true", default=False,
+ help="Whether to test isolates")
+ parser.add_option("--no-harness", "--noharness",
+ default=False, action="store_true",
+ help="Run without test harness of a given suite")
+ parser.add_option("--random-seed", default=0, type=int,
+ help="Default seed for initializing random generator")
+ parser.add_option("-t", "--timeout", default=60, type=int,
+ help="Timeout for single test in seconds")
+ parser.add_option("-v", "--verbose", default=False, action="store_true",
+ help="Verbose output")
+
+ # TODO(machenbach): Temporary options for rolling out new test runner
+ # features.
+ parser.add_option("--mastername", default='',
+ help="Mastername property from infrastructure. Not "
+ "setting this option indicates manual usage.")
+ parser.add_option("--buildername", default='',
+ help="Buildername property from infrastructure. Not "
+ "setting this option indicates manual usage.")
def _add_parser_options(self, parser):
pass
@@ -378,6 +460,12 @@ class BaseTestRunner(object):
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
+ if options.j == 0:
+ options.j = multiprocessing.cpu_count()
+
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+
def _buildbot_to_v8_mode(self, config):
"""Convert buildbot build configs to configs understood by the v8 runner.
@@ -471,9 +559,9 @@ class BaseTestRunner(object):
return reduce(list.__add__, map(expand_test_group, args), [])
- def _get_suites(self, args, verbose=False):
+ def _get_suites(self, args, options):
names = self._args_to_suite_names(args)
- return self._load_suites(names, verbose)
+ return self._load_suites(names, options)
def _args_to_suite_names(self, args):
# Use default tests if no test configuration was provided at the cmd line.
@@ -484,21 +572,100 @@ class BaseTestRunner(object):
def _get_default_suite_names(self):
return []
- def _expand_test_group(self, name):
- return TEST_MAP.get(name, [name])
-
- def _load_suites(self, names, verbose=False):
+ def _load_suites(self, names, options):
+ test_config = self._create_test_config(options)
def load_suite(name):
- if verbose:
+ if options.verbose:
print '>>> Loading test suite: %s' % name
return testsuite.TestSuite.LoadTestSuite(
- os.path.join(self.basedir, 'test', name))
+ os.path.join(self.basedir, 'test', name),
+ test_config)
return map(load_suite, names)
+ def _prepare_suites(self, suites, options):
+ self._load_status_files(suites, options)
+ for s in suites:
+ s.ReadTestCases()
+
+ def _load_status_files(self, suites, options):
+ # simd_mips is true if SIMD is fully supported on MIPS
+ variables = self._get_statusfile_variables(options)
+ for s in suites:
+ s.ReadStatusFile(variables)
+
+ def _get_statusfile_variables(self, options):
+ simd_mips = (
+ self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+ self.build_config.mips_arch_variant == "r6" and
+ self.build_config.mips_use_msa)
+
+ # TODO(all): Combine "simulator" and "simulator_run".
+ # TODO(machenbach): In GN we can derive simulator run from
+ # target_arch != v8_target_arch in the dumped build config.
+ return {
+ "arch": self.build_config.arch,
+ "asan": self.build_config.asan,
+ "byteorder": sys.byteorder,
+ "dcheck_always_on": self.build_config.dcheck_always_on,
+ "deopt_fuzzer": False,
+ "endurance_fuzzer": False,
+ "gc_fuzzer": False,
+ "gc_stress": False,
+ "gcov_coverage": self.build_config.gcov_coverage,
+ "isolates": options.isolates,
+ "mode": self.mode_options.status_mode,
+ "msan": self.build_config.msan,
+ "no_harness": options.no_harness,
+ "no_i18n": self.build_config.no_i18n,
+ "no_snap": self.build_config.no_snap,
+ "novfp3": False,
+ "predictable": self.build_config.predictable,
+ "simd_mips": simd_mips,
+ "simulator": utils.UseSimulator(self.build_config.arch),
+ "simulator_run": False,
+ "system": utils.GuessOS(),
+ "tsan": self.build_config.tsan,
+ "ubsan_vptr": self.build_config.ubsan_vptr,
+ }
+
+ def _create_test_config(self, options):
+ timeout = options.timeout * self._timeout_scalefactor(options)
+ return TestConfig(
+ command_prefix=options.command_prefix,
+ extra_flags=options.extra_flags,
+ isolates=options.isolates,
+ mode_flags=self.mode_options.flags,
+ no_harness=options.no_harness,
+ noi18n=self.build_config.no_i18n,
+ random_seed=options.random_seed,
+ shell_dir=self.outdir,
+ timeout=timeout,
+ verbose=options.verbose,
+ )
+
+ def _timeout_scalefactor(self, options):
+ factor = self.mode_options.timeout_scalefactor
+
+ # Simulators are slow, therefore allow a longer timeout.
+ if self.build_config.arch in SLOW_ARCHS:
+ factor *= 2
+
+ # Predictable mode is slower.
+ if self.build_config.predictable:
+ factor *= 2
+
+ return factor
+
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
+ def _prepare_procs(self, procs):
+ procs = filter(None, procs)
+ for i in xrange(0, len(procs) - 1):
+ procs[i].connect_to(procs[i + 1])
+ procs[0].setup()
+
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
@@ -541,3 +708,29 @@ class BaseTestRunner(object):
return 1, 1
return shard_run, shard_count
+
+ def _create_progress_indicators(self, options):
+ procs = [PROGRESS_INDICATORS[options.progress]()]
+ if options.junitout:
+ procs.append(progress.JUnitTestProgressIndicator(options.junitout,
+ options.junittestsuite))
+ if options.json_test_results:
+ procs.append(progress.JsonTestProgressIndicator(
+ options.json_test_results,
+ self.build_config.arch,
+ self.mode_options.execution_mode))
+ return procs
+
+ def _create_timeout_proc(self, options):
+ if not options.total_timeout_sec:
+ return None
+ return TimeoutProc(options.total_timeout_sec)
+
+ def _create_signal_proc(self):
+ return SignalProc()
+
+ def _create_rerun_proc(self, options):
+ if not options.rerun_failures_count:
+ return None
+ return RerunProc(options.rerun_failures_count,
+ options.rerun_failures_max)
diff --git a/deps/v8/tools/testrunner/deopt_fuzzer.py b/deps/v8/tools/testrunner/deopt_fuzzer.py
deleted file mode 100755
index 5e6b79f5e9..0000000000
--- a/deps/v8/tools/testrunner/deopt_fuzzer.py
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from os.path import join
-import json
-import math
-import multiprocessing
-import os
-import random
-import shlex
-import sys
-import time
-
-# Adds testrunner to the path hence it has to be imported at the beggining.
-import base_runner
-
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.local import verbose
-from testrunner.objects import context
-
-
-DEFAULT_SUITES = ["mjsunit", "webkit"]
-TIMEOUT_DEFAULT = 60
-
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
- "mipsel"]
-MAX_DEOPT = 1000000000
-DISTRIBUTION_MODES = ["smooth", "random"]
-
-
-class DeoptFuzzer(base_runner.BaseTestRunner):
- def __init__(self, *args, **kwargs):
- super(DeoptFuzzer, self).__init__(*args, **kwargs)
-
- class RandomDistribution:
- def __init__(self, seed=None):
- seed = seed or random.randint(1, sys.maxint)
- print "Using random distribution with seed %d" % seed
- self._random = random.Random(seed)
-
- def Distribute(self, n, m):
- if n > m:
- n = m
- return self._random.sample(xrange(1, m + 1), n)
-
- class SmoothDistribution:
- """Distribute n numbers into the interval [1:m].
- F1: Factor of the first derivation of the distribution function.
- F2: Factor of the second derivation of the distribution function.
- With F1 and F2 set to 0, the distribution will be equal.
- """
- def __init__(self, factor1=2.0, factor2=0.2):
- self._factor1 = factor1
- self._factor2 = factor2
-
- def Distribute(self, n, m):
- if n > m:
- n = m
- if n <= 1:
- return [ 1 ]
-
- result = []
- x = 0.0
- dx = 1.0
- ddx = self._factor1
- dddx = self._factor2
- for i in range(0, n):
- result += [ x ]
- x += dx
- dx += ddx
- ddx += dddx
-
- # Project the distribution into the interval [0:M].
- result = [ x * m / result[-1] for x in result ]
-
- # Equalize by n. The closer n is to m, the more equal will be the
- # distribution.
- for (i, x) in enumerate(result):
- # The value of x if it was equally distributed.
- equal_x = i / float(n - 1) * float(m - 1) + 1
-
- # Difference factor between actual and equal distribution.
- diff = 1 - (x / equal_x)
-
- # Equalize x dependent on the number of values to distribute.
- result[i] = int(x + (i + 1) * diff)
- return result
-
-
- def _distribution(self, options):
- if options.distribution_mode == "random":
- return self.RandomDistribution(options.seed)
- if options.distribution_mode == "smooth":
- return self.SmoothDistribution(options.distribution_factor1,
- options.distribution_factor2)
-
-
- def _add_parser_options(self, parser):
- parser.add_option("--command-prefix",
- help="Prepended to each shell command used to run a test",
- default="")
- parser.add_option("--coverage", help=("Exponential test coverage "
- "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
- default=0.4, type="float")
- parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
- "with a small number of deopt points (range 0, inf)"),
- default=20, type="int")
- parser.add_option("--distribution-factor1", help=("Factor of the first "
- "derivation of the distribution function"), default=2.0,
- type="float")
- parser.add_option("--distribution-factor2", help=("Factor of the second "
- "derivation of the distribution function"), default=0.7,
- type="float")
- parser.add_option("--distribution-mode", help=("How to select deopt points "
- "for a given test (smooth|random)"),
- default="smooth")
- parser.add_option("--dump-results-file", help=("Dump maximum number of "
- "deopt points per test to a file"))
- parser.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- default="")
- parser.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- parser.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- parser.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(),
- default="mono")
- parser.add_option("--seed", help="The seed for the random distribution",
- type="int")
- parser.add_option("-t", "--timeout", help="Timeout in seconds",
- default= -1, type="int")
- parser.add_option("--random-seed", default=0, dest="random_seed",
- help="Default seed for initializing random generator")
- parser.add_option("--fuzzer-random-seed", default=0,
- help="Default seed for initializing fuzzer random "
- "generator")
- return parser
-
-
- def _process_options(self, options):
- # Special processing of other options, sorted alphabetically.
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = shlex.split(options.extra_flags)
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
- while options.random_seed == 0:
- options.random_seed = random.SystemRandom().randint(-2147483648,
- 2147483647)
- if not options.distribution_mode in DISTRIBUTION_MODES:
- print "Unknown distribution mode %s" % options.distribution_mode
- return False
- if options.distribution_factor1 < 0.0:
- print ("Distribution factor1 %s is out of range. Defaulting to 0.0"
- % options.distribution_factor1)
- options.distribution_factor1 = 0.0
- if options.distribution_factor2 < 0.0:
- print ("Distribution factor2 %s is out of range. Defaulting to 0.0"
- % options.distribution_factor2)
- options.distribution_factor2 = 0.0
- if options.coverage < 0.0 or options.coverage > 1.0:
- print ("Coverage %s is out of range. Defaulting to 0.4"
- % options.coverage)
- options.coverage = 0.4
- if options.coverage_lift < 0:
- print ("Coverage lift %s is out of range. Defaulting to 0"
- % options.coverage_lift)
- options.coverage_lift = 0
- return True
-
- def _calculate_n_tests(self, m, options):
- """Calculates the number of tests from m deopt points with exponential
- coverage.
- The coverage is expected to be between 0.0 and 1.0.
- The 'coverage lift' lifts the coverage for tests with smaller m values.
- """
- c = float(options.coverage)
- l = float(options.coverage_lift)
- return int(math.pow(m, (m * c + l) / (m + l)))
-
- def _get_default_suite_names(self):
- return DEFAULT_SUITES
-
- def _do_execute(self, suites, args, options):
- print(">>> Running tests for %s.%s" % (self.build_config.arch,
- self.mode_name))
-
- dist = self._distribution(options)
-
- # Populate context object.
- timeout = options.timeout
- if timeout == -1:
- # Simulators are slow, therefore allow a longer default timeout.
- if self.build_config.arch in SLOW_ARCHS:
- timeout = 2 * TIMEOUT_DEFAULT;
- else:
- timeout = TIMEOUT_DEFAULT;
-
- timeout *= self.mode_options.timeout_scalefactor
- ctx = context.Context(self.build_config.arch,
- self.mode_options.execution_mode,
- self.outdir,
- self.mode_options.flags, options.verbose,
- timeout, options.isolates,
- options.command_prefix,
- options.extra_flags,
- False, # Keep i18n on by default.
- options.random_seed,
- True, # No sorting of test cases.
- 0, # Don't rerun failing tests.
- 0, # No use of a rerun-failing-tests maximum.
- False, # No no_harness mode.
- False, # Don't use perf data.
- False) # Coverage not supported.
-
- # Find available test suites and read test cases from them.
- variables = {
- "arch": self.build_config.arch,
- "asan": self.build_config.asan,
- "byteorder": sys.byteorder,
- "dcheck_always_on": self.build_config.dcheck_always_on,
- "deopt_fuzzer": True,
- "gc_fuzzer": False,
- "gc_stress": False,
- "gcov_coverage": self.build_config.gcov_coverage,
- "isolates": options.isolates,
- "mode": self.mode_options.status_mode,
- "msan": self.build_config.msan,
- "no_harness": False,
- "no_i18n": self.build_config.no_i18n,
- "no_snap": self.build_config.no_snap,
- "novfp3": False,
- "predictable": self.build_config.predictable,
- "simulator": utils.UseSimulator(self.build_config.arch),
- "simulator_run": False,
- "system": utils.GuessOS(),
- "tsan": self.build_config.tsan,
- "ubsan_vptr": self.build_config.ubsan_vptr,
- }
- num_tests = 0
- test_id = 0
-
- # Remember test case prototypes for the fuzzing phase.
- test_backup = dict((s, []) for s in suites)
-
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- s.FilterTestCasesByStatus(False)
-
- test_backup[s] = s.tests
- analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
- "--print-deopt-stress"]
- s.tests = [t.create_variant(t.variant, analysis_flags, 'analysis')
- for t in s.tests]
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- t.cmd = t.get_command(ctx)
- test_id += 1
-
- if num_tests == 0:
- print "No tests to run."
- return 0
-
- print(">>> Collection phase")
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- exit_code = runner.Run(options.j)
-
- print(">>> Analysis phase")
- num_tests = 0
- test_id = 0
- for s in suites:
- test_results = {}
- for t in s.tests:
- for line in runner.outputs[t].stdout.splitlines():
- if line.startswith("=== Stress deopt counter: "):
- test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
- for t in s.tests:
- if t.path not in test_results:
- print "Missing results for %s" % t.path
- if options.dump_results_file:
- results_dict = dict((t.path, n) for (t, n) in test_results.iteritems())
- with file("%s.%d.txt" % (options.dump_results_file, time.time()),
- "w") as f:
- f.write(json.dumps(results_dict))
-
- # Reset tests and redistribute the prototypes from the collection phase.
- s.tests = []
- if options.verbose:
- print "Test distributions:"
- for t in test_backup[s]:
- max_deopt = test_results.get(t.path, 0)
- if max_deopt == 0:
- continue
- n_deopt = self._calculate_n_tests(max_deopt, options)
- distribution = dist.Distribute(n_deopt, max_deopt)
- if options.verbose:
- print "%s %s" % (t.path, distribution)
- for n, d in enumerate(distribution):
- fuzzing_flags = ["--deopt-every-n-times", "%d" % d]
- s.tests.append(t.create_variant(t.variant, fuzzing_flags, n))
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- t.cmd = t.get_command(ctx)
- test_id += 1
-
- if num_tests == 0:
- print "No tests to run."
- return exit_code
-
- print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- code = runner.Run(options.j)
- return exit_code or code
-
-
-if __name__ == '__main__':
- sys.exit(DeoptFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/gc_fuzzer.py b/deps/v8/tools/testrunner/gc_fuzzer.py
deleted file mode 100755
index 18be227d98..0000000000
--- a/deps/v8/tools/testrunner/gc_fuzzer.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from os.path import join
-import itertools
-import json
-import math
-import multiprocessing
-import os
-import random
-import shlex
-import sys
-import time
-
-# Adds testrunner to the path hence it has to be imported at the beggining.
-import base_runner
-
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.local import verbose
-from testrunner.objects import context
-
-
-DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
-TIMEOUT_DEFAULT = 60
-
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
- "mipsel"]
-
-
-class GCFuzzer(base_runner.BaseTestRunner):
- def __init__(self, *args, **kwargs):
- super(GCFuzzer, self).__init__(*args, **kwargs)
-
- self.fuzzer_rng = None
-
- def _add_parser_options(self, parser):
- parser.add_option("--command-prefix",
- help="Prepended to each shell command used to run a test",
- default="")
- parser.add_option("--coverage", help=("Exponential test coverage "
- "(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
- default=0.4, type="float")
- parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
- "with a low memory size reached (range 0, inf)"),
- default=20, type="int")
- parser.add_option("--dump-results-file", help="Dump maximum limit reached")
- parser.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- default="")
- parser.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- parser.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- parser.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(),
- default="mono")
- parser.add_option("-t", "--timeout", help="Timeout in seconds",
- default= -1, type="int")
- parser.add_option("--random-seed", default=0,
- help="Default seed for initializing random generator")
- parser.add_option("--fuzzer-random-seed", default=0,
- help="Default seed for initializing fuzzer random "
- "generator")
- parser.add_option("--stress-compaction", default=False, action="store_true",
- help="Enable stress_compaction_percentage flag")
-
- parser.add_option("--distribution-factor1", help="DEPRECATED")
- parser.add_option("--distribution-factor2", help="DEPRECATED")
- parser.add_option("--distribution-mode", help="DEPRECATED")
- parser.add_option("--seed", help="DEPRECATED")
- return parser
-
-
- def _process_options(self, options):
- # Special processing of other options, sorted alphabetically.
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = shlex.split(options.extra_flags)
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
- while options.random_seed == 0:
- options.random_seed = random.SystemRandom().randint(-2147483648,
- 2147483647)
- while options.fuzzer_random_seed == 0:
- options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
- 2147483647)
- self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
- return True
-
- def _calculate_n_tests(self, m, options):
- """Calculates the number of tests from m points with exponential coverage.
- The coverage is expected to be between 0.0 and 1.0.
- The 'coverage lift' lifts the coverage for tests with smaller m values.
- """
- c = float(options.coverage)
- l = float(options.coverage_lift)
- return int(math.pow(m, (m * c + l) / (m + l)))
-
- def _get_default_suite_names(self):
- return DEFAULT_SUITES
-
- def _do_execute(self, suites, args, options):
- print(">>> Running tests for %s.%s" % (self.build_config.arch,
- self.mode_name))
-
- # Populate context object.
- timeout = options.timeout
- if timeout == -1:
- # Simulators are slow, therefore allow a longer default timeout.
- if self.build_config.arch in SLOW_ARCHS:
- timeout = 2 * TIMEOUT_DEFAULT;
- else:
- timeout = TIMEOUT_DEFAULT;
-
- timeout *= self.mode_options.timeout_scalefactor
- ctx = context.Context(self.build_config.arch,
- self.mode_options.execution_mode,
- self.outdir,
- self.mode_options.flags, options.verbose,
- timeout, options.isolates,
- options.command_prefix,
- options.extra_flags,
- False, # Keep i18n on by default.
- options.random_seed,
- True, # No sorting of test cases.
- 0, # Don't rerun failing tests.
- 0, # No use of a rerun-failing-tests maximum.
- False, # No no_harness mode.
- False, # Don't use perf data.
- False) # Coverage not supported.
-
- num_tests = self._load_tests(args, options, suites, ctx)
- if num_tests == 0:
- print "No tests to run."
- return 0
-
- test_backup = dict(map(lambda s: (s, s.tests), suites))
-
- print('>>> Collection phase')
- for s in suites:
- analysis_flags = ['--fuzzer-gc-analysis']
- s.tests = map(lambda t: t.create_variant(t.variant, analysis_flags,
- 'analysis'),
- s.tests)
- for t in s.tests:
- t.cmd = t.get_command(ctx)
-
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
- exit_code = runner.Run(options.j)
-
- print('>>> Analysis phase')
- test_results = dict()
- for s in suites:
- for t in s.tests:
- # Skip failed tests.
- if t.output_proc.has_unexpected_output(runner.outputs[t]):
- print '%s failed, skipping' % t.path
- continue
- max_limit = self._get_max_limit_reached(runner.outputs[t])
- if max_limit:
- test_results[t.path] = max_limit
-
- runner = None
-
- if options.dump_results_file:
- with file("%s.%d.txt" % (options.dump_results_file, time.time()),
- "w") as f:
- f.write(json.dumps(test_results))
-
- num_tests = 0
- for s in suites:
- s.tests = []
- for t in test_backup[s]:
- max_percent = test_results.get(t.path, 0)
- if not max_percent or max_percent < 1.0:
- continue
- max_percent = int(max_percent)
-
- subtests_count = self._calculate_n_tests(max_percent, options)
-
- if options.verbose:
- print ('%s [x%d] (max marking limit=%.02f)' %
- (t.path, subtests_count, max_percent))
- for i in xrange(0, subtests_count):
- fuzzer_seed = self._next_fuzzer_seed()
- fuzzing_flags = [
- '--stress_marking', str(max_percent),
- '--fuzzer_random_seed', str(fuzzer_seed),
- ]
- if options.stress_compaction:
- fuzzing_flags.append('--stress_compaction_random')
- s.tests.append(t.create_variant(t.variant, fuzzing_flags, i))
- for t in s.tests:
- t.cmd = t.get_command(ctx)
- num_tests += len(s.tests)
-
- if num_tests == 0:
- print "No tests to run."
- return exit_code
-
- print(">>> Fuzzing phase (%d test cases)" % num_tests)
- progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
- runner = execution.Runner(suites, progress_indicator, ctx)
-
- return runner.Run(options.j) or exit_code
-
- def _load_tests(self, args, options, suites, ctx):
- # Find available test suites and read test cases from them.
- variables = {
- "arch": self.build_config.arch,
- "asan": self.build_config.asan,
- "byteorder": sys.byteorder,
- "dcheck_always_on": self.build_config.dcheck_always_on,
- "deopt_fuzzer": False,
- "gc_fuzzer": True,
- "gc_stress": False,
- "gcov_coverage": self.build_config.gcov_coverage,
- "isolates": options.isolates,
- "mode": self.mode_options.status_mode,
- "msan": self.build_config.msan,
- "no_harness": False,
- "no_i18n": self.build_config.no_i18n,
- "no_snap": self.build_config.no_snap,
- "novfp3": False,
- "predictable": self.build_config.predictable,
- "simulator": utils.UseSimulator(self.build_config.arch),
- "simulator_run": False,
- "system": utils.GuessOS(),
- "tsan": self.build_config.tsan,
- "ubsan_vptr": self.build_config.ubsan_vptr,
- }
-
- num_tests = 0
- test_id = 0
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- s.FilterTestCasesByStatus(False)
-
- num_tests += len(s.tests)
- for t in s.tests:
- t.id = test_id
- test_id += 1
-
- return num_tests
-
- # Parses test stdout and returns what was the highest reached percent of the
- # incremental marking limit (0-100).
- @staticmethod
- def _get_max_limit_reached(output):
- if not output.stdout:
- return None
-
- for l in reversed(output.stdout.splitlines()):
- if l.startswith('### Maximum marking limit reached ='):
- return float(l.split()[6])
-
- return None
-
- def _next_fuzzer_seed(self):
- fuzzer_seed = None
- while not fuzzer_seed:
- fuzzer_seed = self.fuzzer_rng.randint(-2147483648, 2147483647)
- return fuzzer_seed
-
-
-if __name__ == '__main__':
- sys.exit(GCFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index 93b1ac9497..adc9c2e452 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -4,6 +4,7 @@
import os
+import signal
import subprocess
import sys
import threading
@@ -17,6 +18,19 @@ SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
+def setup_testing():
+ """For testing only: We use threading under the hood instead of
+ multiprocessing to make coverage work. Signal handling is only supported
+ in the main thread, so we disable it for testing.
+ """
+ signal.signal = lambda *_: None
+
+
+class AbortException(Exception):
+ """Indicates early abort on SIGINT, SIGTERM or internal hard timeout."""
+ pass
+
+
class BaseCommand(object):
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
verbose=False):
@@ -35,10 +49,16 @@ class BaseCommand(object):
process = self._start_process(**additional_popen_kwargs)
+ # Variable to communicate with the signal handler.
+ abort_occured = [False]
+ def handler(signum, frame):
+ self._abort(process, abort_occured)
+ signal.signal(signal.SIGTERM, handler)
+
# Variable to communicate with the timer.
timeout_occured = [False]
timer = threading.Timer(
- self.timeout, self._on_timeout, [process, timeout_occured])
+ self.timeout, self._abort, [process, timeout_occured])
timer.start()
start_time = time.time()
@@ -47,6 +67,9 @@ class BaseCommand(object):
timer.cancel()
+ if abort_occured[0]:
+ raise AbortException()
+
return output.Output(
process.returncode,
timeout_occured[0],
@@ -85,12 +108,12 @@ class BaseCommand(object):
def _kill_process(self, process):
raise NotImplementedError()
- def _on_timeout(self, process, timeout_occured):
- timeout_occured[0] = True
+ def _abort(self, process, abort_called):
+ abort_called[0] = True
try:
self._kill_process(process)
except OSError:
- sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+ pass
def __str__(self):
return self.to_string()
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
deleted file mode 100644
index d6d0725365..0000000000
--- a/deps/v8/tools/testrunner/local/execution.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import collections
-import os
-import re
-import shutil
-import sys
-import traceback
-
-from . import command
-from . import perfdata
-from . import statusfile
-from . import utils
-from . pool import Pool
-from ..objects import predictable
-
-
-# Base dir of the v8 checkout.
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__)))))
-TEST_DIR = os.path.join(BASE_DIR, "test")
-
-
-# Structure that keeps global information per worker process.
-ProcessContext = collections.namedtuple(
- 'process_context', ['sancov_dir'])
-
-
-TestJobResult = collections.namedtuple(
- 'TestJobResult', ['id', 'outproc_result'])
-
-def MakeProcessContext(sancov_dir):
- return ProcessContext(sancov_dir)
-
-
-# Global function for multiprocessing, because pickling a static method doesn't
-# work on Windows.
-def run_job(job, process_context):
- return job.run(process_context)
-
-
-class Job(object):
- """Stores data to be sent over the multi-process boundary.
-
- All contained fields will be pickled/unpickled.
- """
-
- def run(self, process_context):
- raise NotImplementedError()
-
-
-class TestJob(Job):
- def __init__(self, test_id, cmd, outproc, run_num):
- self.test_id = test_id
- self.cmd = cmd
- self.outproc = outproc
- self.run_num = run_num
-
- def _rename_coverage_data(self, out, sancov_dir):
- """Rename coverage data.
-
- Rename files with PIDs to files with unique test IDs, because the number
- of tests might be higher than pid_max. E.g.:
- d8.1234.sancov -> d8.test.42.1.sancov, where 1234 was the process' PID,
- 42 is the test ID and 1 is the attempt (the same test might be rerun on
- failures).
- """
- if sancov_dir and out.pid is not None:
- # Doesn't work on windows so basename is sufficient to get the shell name.
- shell = os.path.basename(self.cmd.shell)
- sancov_file = os.path.join(sancov_dir, "%s.%d.sancov" % (shell, out.pid))
-
- # Some tests are expected to fail and don't produce coverage data.
- if os.path.exists(sancov_file):
- parts = sancov_file.split(".")
- new_sancov_file = ".".join(
- parts[:-2] +
- ["test", str(self.test_id), str(self.run_num)] +
- parts[-1:]
- )
- assert not os.path.exists(new_sancov_file)
- os.rename(sancov_file, new_sancov_file)
-
- def run(self, context):
- output = self.cmd.execute()
- self._rename_coverage_data(output, context.sancov_dir)
- return TestJobResult(self.test_id, self.outproc.process(output))
-
-
-class Runner(object):
-
- def __init__(self, suites, progress_indicator, context, outproc_factory=None):
- self.datapath = os.path.join("out", "testrunner_data")
- self.perf_data_manager = perfdata.GetPerfDataManager(
- context, self.datapath)
- self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
- self.perf_failures = False
- self.printed_allocations = False
- self.outproc_factory = outproc_factory or (lambda test: test.output_proc)
- self.tests = [t for s in suites for t in s.tests]
-
- # TODO(majeski): Pass dynamically instead of keeping them in the runner.
- # Maybe some observer?
- self.outputs = {t: None for t in self.tests}
-
- self.suite_names = [s.name for s in suites]
-
- # Always pre-sort by status file, slowest tests first.
- self.tests.sort(key=lambda t: t.is_slow, reverse=True)
-
- # Sort by stored duration if not opted out.
- if not context.no_sorting:
- self.tests.sort(key=lambda t: self.perfdata.FetchPerfData(t) or 1.0,
- reverse=True)
-
- self._CommonInit(suites, progress_indicator, context)
-
- def _CommonInit(self, suites, progress_indicator, context):
- self.total = 0
- for s in suites:
- for t in s.tests:
- t.id = self.total
- self.total += 1
- self.indicator = progress_indicator
- progress_indicator.SetRunner(self)
- self.context = context
- self.succeeded = 0
- self.remaining = self.total
- self.failed = []
- self.crashed = 0
- self.reran_tests = 0
-
- def _RunPerfSafe(self, fun):
- try:
- fun()
- except Exception, e:
- print("PerfData exception: %s" % e)
- self.perf_failures = True
-
- def _MaybeRerun(self, pool, test, result):
- if test.run <= self.context.rerun_failures_count:
- # Possibly rerun this test if its run count is below the maximum per
- # test. <= as the flag controls reruns not including the first run.
- if test.run == 1:
- # Count the overall number of reran tests on the first rerun.
- if self.reran_tests < self.context.rerun_failures_max:
- self.reran_tests += 1
- else:
- # Don't rerun this if the overall number of rerun tests has been
- # reached.
- return
- if (test.run >= 2 and
- result.output.duration > self.context.timeout / 20.0):
- # Rerun slow tests at most once.
- return
-
- # Rerun this test.
- test.run += 1
- pool.add([
- TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
- ])
- self.remaining += 1
- self.total += 1
-
- def _ProcessTest(self, test, result, pool):
- self.outputs[test] = result.output
- has_unexpected_output = result.has_unexpected_output
- if has_unexpected_output:
- self.failed.append(test)
- if result.output.HasCrashed():
- self.crashed += 1
- else:
- self.succeeded += 1
- self.remaining -= 1
- # For the indicator, everything that happens after the first run is treated
- # as unexpected even if it flakily passes in order to include it in the
- # output.
- self.indicator.HasRun(test, result.output,
- has_unexpected_output or test.run > 1)
- if has_unexpected_output:
- # Rerun test failures after the indicator has processed the results.
- self._VerbosePrint("Attempting to rerun test after failure.")
- self._MaybeRerun(pool, test, result)
- # Update the perf database if the test succeeded.
- return not has_unexpected_output
-
- def Run(self, jobs):
- self.indicator.Starting()
- self._RunInternal(jobs)
- self.indicator.Done()
- if self.failed:
- return 1
- elif self.remaining:
- return 2
- return 0
-
- def _RunInternal(self, jobs):
- pool = Pool(jobs)
- test_map = {}
- queued_exception = [None]
- def gen_tests():
- for test in self.tests:
- assert test.id >= 0
- test_map[test.id] = test
- try:
- yield [
- TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
- ]
- except Exception, e:
- # If this failed, save the exception and re-raise it later (after
- # all other tests have had a chance to run).
- queued_exception[0] = e, traceback.format_exc()
- continue
- try:
- it = pool.imap_unordered(
- fn=run_job,
- gen=gen_tests(),
- process_context_fn=MakeProcessContext,
- process_context_args=[self.context.sancov_dir],
- )
- for result in it:
- if result.heartbeat:
- self.indicator.Heartbeat()
- continue
-
- job_result = result.value
- test_id = job_result.id
- outproc_result = job_result.outproc_result
-
- test = test_map[test_id]
- update_perf = self._ProcessTest(test, outproc_result, pool)
- if update_perf:
- self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
- test, outproc_result.output.duration))
- except KeyboardInterrupt:
- raise
- except:
- traceback.print_exc()
- raise
- finally:
- self._VerbosePrint("Closing process pool.")
- pool.terminate()
- self._VerbosePrint("Closing database connection.")
- self._RunPerfSafe(self.perf_data_manager.close)
- if self.perf_failures:
- # Nuke perf data in case of failures. This might not work on windows as
- # some files might still be open.
- print "Deleting perf test data due to db corruption."
- shutil.rmtree(self.datapath)
- if queued_exception[0]:
- e, stacktrace = queued_exception[0]
- print stacktrace
- raise e
-
- def _VerbosePrint(self, text):
- if self.context.verbose:
- print text
- sys.stdout.flush()
-
-
-class BreakNowException(Exception):
- def __init__(self, value):
- super(BreakNowException, self).__init__()
- self.value = value
-
- def __str__(self):
- return repr(self.value)
diff --git a/deps/v8/tools/testrunner/local/perfdata.py b/deps/v8/tools/testrunner/local/perfdata.py
deleted file mode 100644
index 4cb618b0be..0000000000
--- a/deps/v8/tools/testrunner/local/perfdata.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import shelve
-import threading
-
-
-class PerfDataEntry(object):
- def __init__(self):
- self.avg = 0.0
- self.count = 0
-
- def AddResult(self, result):
- kLearnRateLimiter = 99 # Greater value means slower learning.
- # We use an approximation of the average of the last 100 results here:
- # The existing average is weighted with kLearnRateLimiter (or less
- # if there are fewer data points).
- effective_count = min(self.count, kLearnRateLimiter)
- self.avg = self.avg * effective_count + result
- self.count = effective_count + 1
- self.avg /= self.count
-
-
-class PerfDataStore(object):
- def __init__(self, datadir, arch, mode):
- filename = os.path.join(datadir, "%s.%s.perfdata" % (arch, mode))
- self.database = shelve.open(filename, protocol=2)
- self.closed = False
- self.lock = threading.Lock()
-
- def __del__(self):
- self.close()
-
- def close(self):
- if self.closed: return
- self.database.close()
- self.closed = True
-
- def FetchPerfData(self, test):
- """Returns the observed duration for |test| as read from the store."""
- key = test.get_id()
- if key in self.database:
- return self.database[key].avg
- return None
-
- def UpdatePerfData(self, test, duration):
- """Updates the persisted value in the store with duration."""
- testkey = test.get_id()
- self.RawUpdatePerfData(testkey, duration)
-
- def RawUpdatePerfData(self, testkey, duration):
- with self.lock:
- if testkey in self.database:
- entry = self.database[testkey]
- else:
- entry = PerfDataEntry()
- entry.AddResult(duration)
- self.database[testkey] = entry
-
-
-class PerfDataManager(object):
- def __init__(self, datadir):
- self.datadir = os.path.abspath(datadir)
- if not os.path.exists(self.datadir):
- os.makedirs(self.datadir)
- self.stores = {} # Keyed by arch, then mode.
- self.closed = False
- self.lock = threading.Lock()
-
- def __del__(self):
- self.close()
-
- def close(self):
- if self.closed: return
- for arch in self.stores:
- modes = self.stores[arch]
- for mode in modes:
- store = modes[mode]
- store.close()
- self.closed = True
-
- def GetStore(self, arch, mode):
- with self.lock:
- if not arch in self.stores:
- self.stores[arch] = {}
- modes = self.stores[arch]
- if not mode in modes:
- modes[mode] = PerfDataStore(self.datadir, arch, mode)
- return modes[mode]
-
-
-class NullPerfDataStore(object):
- def UpdatePerfData(self, test, duration):
- pass
-
- def FetchPerfData(self, test):
- return None
-
-
-class NullPerfDataManager(object):
- def __init__(self):
- pass
-
- def GetStore(self, *args, **kwargs):
- return NullPerfDataStore()
-
- def close(self):
- pass
-
-
-def GetPerfDataManager(context, datadir):
- if context.use_perf_data:
- return PerfDataManager(datadir)
- else:
- return NullPerfDataManager()
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 9199b62d8a..7c9a250bc3 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -4,42 +4,38 @@
# found in the LICENSE file.
from Queue import Empty
-from multiprocessing import Event, Process, Queue
+from contextlib import contextmanager
+from multiprocessing import Process, Queue
+import os
+import signal
+import time
import traceback
+from . import command
+
def setup_testing():
"""For testing only: Use threading under the hood instead of multiprocessing
to make coverage work.
"""
global Queue
- global Event
global Process
del Queue
- del Event
del Process
from Queue import Queue
- from threading import Event
from threading import Thread as Process
+ # Monkeypatch threading Queue to look like multiprocessing Queue.
+ Queue.cancel_join_thread = lambda self: None
class NormalResult():
def __init__(self, result):
self.result = result
- self.exception = False
- self.break_now = False
-
+ self.exception = None
class ExceptionResult():
- def __init__(self):
- self.exception = True
- self.break_now = False
-
-
-class BreakResult():
- def __init__(self):
- self.exception = False
- self.break_now = True
+ def __init__(self, exception):
+ self.exception = exception
class MaybeResult():
@@ -56,26 +52,43 @@ class MaybeResult():
return MaybeResult(False, value)
-def Worker(fn, work_queue, done_queue, done,
+def Worker(fn, work_queue, done_queue,
process_context_fn=None, process_context_args=None):
"""Worker to be run in a child process.
- The worker stops on two conditions. 1. When the poison pill "STOP" is
- reached or 2. when the event "done" is set."""
+ The worker stops when the poison pill "STOP" is reached.
+ """
try:
kwargs = {}
if process_context_fn and process_context_args is not None:
kwargs.update(process_context=process_context_fn(*process_context_args))
for args in iter(work_queue.get, "STOP"):
- if done.is_set():
- break
try:
done_queue.put(NormalResult(fn(*args, **kwargs)))
+ except command.AbortException:
+ # SIGINT, SIGTERM or internal hard timeout.
+ break
except Exception, e:
traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
- done_queue.put(ExceptionResult())
+ done_queue.put(ExceptionResult(e))
+ # When we reach here on normal tear down, all items have been pulled from
+ # the done_queue before and this should have no effect. On fast abort, it's
+ # possible that a fast worker left items on the done_queue in memory, which
+ # will never be pulled. This call purges those to avoid a deadlock.
+ done_queue.cancel_join_thread()
except KeyboardInterrupt:
- done_queue.put(BreakResult())
+ assert False, 'Unreachable'
+
+
+@contextmanager
+def without_sig():
+ int_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
+ term_handler = signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ try:
+ yield
+ finally:
+ signal.signal(signal.SIGINT, int_handler)
+ signal.signal(signal.SIGTERM, term_handler)
class Pool():
@@ -88,24 +101,28 @@ class Pool():
# Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
BUFFER_FACTOR = 4
- def __init__(self, num_workers, heartbeat_timeout=30):
+ def __init__(self, num_workers, heartbeat_timeout=1):
self.num_workers = num_workers
self.processes = []
self.terminated = False
+ self.abort_now = False
- # Invariant: count >= #work_queue + #done_queue. It is greater when a
- # worker takes an item from the work_queue and before the result is
+ # Invariant: processing_count >= #work_queue + #done_queue. It is greater
+ # when a worker takes an item from the work_queue and before the result is
# submitted to the done_queue. It is equal when no worker is working,
# e.g. when all workers have finished, and when no results are processed.
# Count is only accessed by the parent process. Only the parent process is
# allowed to remove items from the done_queue and to add items to the
# work_queue.
- self.count = 0
- self.work_queue = Queue()
- self.done_queue = Queue()
- self.done = Event()
+ self.processing_count = 0
self.heartbeat_timeout = heartbeat_timeout
+ # Disable sigint and sigterm to prevent subprocesses from capturing the
+ # signals.
+ with without_sig():
+ self.work_queue = Queue()
+ self.done_queue = Queue()
+
def imap_unordered(self, fn, gen,
process_context_fn=None, process_context_args=None):
"""Maps function "fn" to items in generator "gen" on the worker processes
@@ -123,58 +140,63 @@ class Pool():
process_context_fn. All arguments will be pickled and sent beyond the
process boundary.
"""
+ if self.terminated:
+ return
try:
internal_error = False
gen = iter(gen)
self.advance = self._advance_more
- for w in xrange(self.num_workers):
- p = Process(target=Worker, args=(fn,
- self.work_queue,
- self.done_queue,
- self.done,
- process_context_fn,
- process_context_args))
- p.start()
- self.processes.append(p)
+ # Disable sigint and sigterm to prevent subprocesses from capturing the
+ # signals.
+ with without_sig():
+ for w in xrange(self.num_workers):
+ p = Process(target=Worker, args=(fn,
+ self.work_queue,
+ self.done_queue,
+ process_context_fn,
+ process_context_args))
+ p.start()
+ self.processes.append(p)
self.advance(gen)
- while self.count > 0:
+ while self.processing_count > 0:
while True:
try:
- result = self.done_queue.get(timeout=self.heartbeat_timeout)
- break
- except Empty:
- # Indicate a heartbeat. The iterator will continue fetching the
- # next result.
- yield MaybeResult.create_heartbeat()
- self.count -= 1
- if result.exception:
- # TODO(machenbach): Handle a few known types of internal errors
- # gracefully, e.g. missing test files.
- internal_error = True
- continue
- elif result.break_now:
- # A keyboard interrupt happened in one of the worker processes.
- raise KeyboardInterrupt
- else:
- yield MaybeResult.create_result(result.result)
+ # Read from result queue in a responsive fashion. If available,
+ # this will return a normal result immediately or a heartbeat on
+ # heartbeat timeout (default 1 second).
+ result = self._get_result_from_queue()
+ except:
+ # TODO(machenbach): Handle a few known types of internal errors
+ # gracefully, e.g. missing test files.
+ internal_error = True
+ continue
+
+ if self.abort_now:
+ # SIGINT, SIGTERM or internal hard timeout.
+ return
+
+ yield result
+ break
+
self.advance(gen)
except KeyboardInterrupt:
- raise
+ assert False, 'Unreachable'
except Exception as e:
traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
finally:
- self.terminate()
+ self._terminate()
+
if internal_error:
raise Exception("Internal error in a worker process.")
def _advance_more(self, gen):
- while self.count < self.num_workers * self.BUFFER_FACTOR:
+ while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
try:
self.work_queue.put(gen.next())
- self.count += 1
+ self.processing_count += 1
except StopIteration:
self.advance = self._advance_empty
break
@@ -185,27 +207,51 @@ class Pool():
def add(self, args):
"""Adds an item to the work queue. Can be called dynamically while
processing the results from imap_unordered."""
+ assert not self.terminated
+
self.work_queue.put(args)
- self.count += 1
+ self.processing_count += 1
+
+ def abort(self):
+ """Schedules abort on next queue read.
+
+ This is safe to call when handling SIGINT, SIGTERM or when an internal
+ hard timeout is reached.
+ """
+ self.abort_now = True
- def terminate(self):
+ def _terminate(self):
+ """Terminates execution and cleans up the queues.
+
+ If abort() was called before termination, this also terminates the
+ subprocesses and doesn't wait for ongoing tests.
+ """
if self.terminated:
return
self.terminated = True
- # For exceptional tear down set the "done" event to stop the workers before
- # they empty the queue buffer.
- self.done.set()
+ # Drain out work queue from tests
+ try:
+ while True:
+ self.work_queue.get(True, 0.1)
+ except Empty:
+ pass
- for p in self.processes:
+ # Make sure all processes stop
+ for _ in self.processes:
# During normal tear down the workers block on get(). Feed a poison pill
# per worker to make them stop.
self.work_queue.put("STOP")
+ if self.abort_now:
+ for p in self.processes:
+ os.kill(p.pid, signal.SIGTERM)
+
for p in self.processes:
p.join()
- # Drain the queues to prevent failures when queues are garbage collected.
+ # Drain the queues to prevent stderr chatter when queues are garbage
+ # collected.
try:
while True: self.work_queue.get(False)
except:
@@ -214,3 +260,22 @@ class Pool():
while True: self.done_queue.get(False)
except:
pass
+
+ def _get_result_from_queue(self):
+ """Attempts to get the next result from the queue.
+
+ Returns: A wrapped result if one was available within heartbeat timeout,
+ a heartbeat result otherwise.
+ Raises:
+ Exception: If an exception occured when processing the task on the
+ worker side, it is reraised here.
+ """
+ while True:
+ try:
+ result = self.done_queue.get(timeout=self.heartbeat_timeout)
+ self.processing_count -= 1
+ if result.exception:
+ raise result.exception
+ return MaybeResult.create_result(result.result)
+ except Empty:
+ return MaybeResult.create_heartbeat()
diff --git a/deps/v8/tools/testrunner/local/pool_unittest.py b/deps/v8/tools/testrunner/local/pool_unittest.py
index 235eca6393..240cd563f8 100644..100755
--- a/deps/v8/tools/testrunner/local/pool_unittest.py
+++ b/deps/v8/tools/testrunner/local/pool_unittest.py
@@ -3,9 +3,16 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import os
+import sys
import unittest
-from pool import Pool
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local.pool import Pool
def Run(x):
if x == 10:
@@ -17,6 +24,9 @@ class PoolTest(unittest.TestCase):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+ if result.heartbeat:
+ # Any result can be a heartbeat due to timings.
+ continue
results.add(result.value)
self.assertEquals(set(range(0, 10)), results)
@@ -25,6 +35,9 @@ class PoolTest(unittest.TestCase):
pool = Pool(3)
with self.assertRaises(Exception):
for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
+ if result.heartbeat:
+ # Any result can be a heartbeat due to timings.
+ continue
# Item 10 will not appear in results due to an internal exception.
results.add(result.value)
expect = set(range(0, 12))
@@ -35,8 +48,15 @@ class PoolTest(unittest.TestCase):
results = set()
pool = Pool(3)
for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+ if result.heartbeat:
+ # Any result can be a heartbeat due to timings.
+ continue
results.add(result.value)
if result.value < 30:
pool.add([result.value + 20])
self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
results)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
deleted file mode 100644
index f6ebddf2e5..0000000000
--- a/deps/v8/tools/testrunner/local/progress.py
+++ /dev/null
@@ -1,452 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-from functools import wraps
-import json
-import os
-import sys
-import time
-
-from . import junit_output
-from . import statusfile
-from ..testproc import progress as progress_proc
-
-
-class ProgressIndicator(object):
-
- def __init__(self):
- self.runner = None
-
- def SetRunner(self, runner):
- self.runner = runner
-
- def Starting(self):
- pass
-
- def Done(self):
- pass
-
- def HasRun(self, test, output, has_unexpected_output):
- pass
-
- def Heartbeat(self):
- pass
-
- def PrintFailureHeader(self, test):
- if test.output_proc.negative:
- negative_marker = '[negative] '
- else:
- negative_marker = ''
- print "=== %(label)s %(negative)s===" % {
- 'label': test,
- 'negative': negative_marker,
- }
-
- def ToProgressIndicatorProc(self):
- print ('Warning: %s is not available as a processor' %
- self.__class__.__name__)
- return None
-
-
-class IndicatorNotifier(object):
- """Holds a list of progress indicators and notifies them all on events."""
- def __init__(self):
- self.indicators = []
-
- def Register(self, indicator):
- self.indicators.append(indicator)
-
- def ToProgressIndicatorProcs(self):
- return [i.ToProgressIndicatorProc() for i in self.indicators]
-
-
-# Forge all generic event-dispatching methods in IndicatorNotifier, which are
-# part of the ProgressIndicator interface.
-for func_name in ProgressIndicator.__dict__:
- func = getattr(ProgressIndicator, func_name)
- if callable(func) and not func.__name__.startswith('_'):
- def wrap_functor(f):
- @wraps(f)
- def functor(self, *args, **kwargs):
- """Generic event dispatcher."""
- for indicator in self.indicators:
- getattr(indicator, f.__name__)(*args, **kwargs)
- return functor
- setattr(IndicatorNotifier, func_name, wrap_functor(func))
-
-
-class SimpleProgressIndicator(ProgressIndicator):
- """Abstract base class for {Verbose,Dots}ProgressIndicator"""
-
- def Starting(self):
- print 'Running %i tests' % self.runner.total
-
- def Done(self):
- print
- for failed in self.runner.failed:
- output = self.runner.outputs[failed]
- self.PrintFailureHeader(failed)
- if output.stderr:
- print "--- stderr ---"
- print output.stderr.strip()
- if output.stdout:
- print "--- stdout ---"
- print output.stdout.strip()
- print "Command: %s" % failed.cmd.to_string()
- if output.HasCrashed():
- print "exit code: %d" % output.exit_code
- print "--- CRASHED ---"
- if output.HasTimedOut():
- print "--- TIMEOUT ---"
- if len(self.runner.failed) == 0:
- print "==="
- print "=== All tests succeeded"
- print "==="
- else:
- print
- print "==="
- print "=== %i tests failed" % len(self.runner.failed)
- if self.runner.crashed > 0:
- print "=== %i tests CRASHED" % self.runner.crashed
- print "==="
-
-
-class VerboseProgressIndicator(SimpleProgressIndicator):
-
- def HasRun(self, test, output, has_unexpected_output):
- if has_unexpected_output:
- if output.HasCrashed():
- outcome = 'CRASH'
- else:
- outcome = 'FAIL'
- else:
- outcome = 'pass'
- print 'Done running %s: %s' % (test, outcome)
- sys.stdout.flush()
-
- def Heartbeat(self):
- print 'Still working...'
- sys.stdout.flush()
-
- def ToProgressIndicatorProc(self):
- return progress_proc.VerboseProgressIndicator()
-
-
-class DotsProgressIndicator(SimpleProgressIndicator):
-
- def HasRun(self, test, output, has_unexpected_output):
- total = self.runner.succeeded + len(self.runner.failed)
- if (total > 1) and (total % 50 == 1):
- sys.stdout.write('\n')
- if has_unexpected_output:
- if output.HasCrashed():
- sys.stdout.write('C')
- sys.stdout.flush()
- elif output.HasTimedOut():
- sys.stdout.write('T')
- sys.stdout.flush()
- else:
- sys.stdout.write('F')
- sys.stdout.flush()
- else:
- sys.stdout.write('.')
- sys.stdout.flush()
-
- def ToProgressIndicatorProc(self):
- return progress_proc.DotsProgressIndicator()
-
-
-class CompactProgressIndicator(ProgressIndicator):
- """Abstract base class for {Color,Monochrome}ProgressIndicator"""
-
- def __init__(self, templates):
- super(CompactProgressIndicator, self).__init__()
- self.templates = templates
- self.last_status_length = 0
- self.start_time = time.time()
-
- def Done(self):
- self.PrintProgress('Done')
- print "" # Line break.
-
- def HasRun(self, test, output, has_unexpected_output):
- self.PrintProgress(str(test))
- if has_unexpected_output:
- self.ClearLine(self.last_status_length)
- self.PrintFailureHeader(test)
- stdout = output.stdout.strip()
- if len(stdout):
- print self.templates['stdout'] % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- print self.templates['stderr'] % stderr
- print "Command: %s" % test.cmd.to_string()
- if output.HasCrashed():
- print "exit code: %d" % output.exit_code
- print "--- CRASHED ---"
- if output.HasTimedOut():
- print "--- TIMEOUT ---"
-
- def Truncate(self, string, length):
- if length and (len(string) > (length - 3)):
- return string[:(length - 3)] + "..."
- else:
- return string
-
- def PrintProgress(self, name):
- self.ClearLine(self.last_status_length)
- elapsed = time.time() - self.start_time
- progress = 0 if not self.runner.total else (
- ((self.runner.total - self.runner.remaining) * 100) //
- self.runner.total)
- status = self.templates['status_line'] % {
- 'passed': self.runner.succeeded,
- 'progress': progress,
- 'failed': len(self.runner.failed),
- 'test': name,
- 'mins': int(elapsed) / 60,
- 'secs': int(elapsed) % 60
- }
- status = self.Truncate(status, 78)
- self.last_status_length = len(status)
- print status,
- sys.stdout.flush()
-
-
-class ColorProgressIndicator(CompactProgressIndicator):
-
- def __init__(self):
- templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|"
- "\033[34m%%%(progress) 4d\033[0m|"
- "\033[32m+%(passed) 4d\033[0m|"
- "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
- 'stdout': "\033[1m%s\033[0m",
- 'stderr': "\033[31m%s\033[0m",
- }
- super(ColorProgressIndicator, self).__init__(templates)
-
- def ClearLine(self, last_line_length):
- print "\033[1K\r",
-
- def ToProgressIndicatorProc(self):
- return progress_proc.ColorProgressIndicator()
-
-
-class MonochromeProgressIndicator(CompactProgressIndicator):
-
- def __init__(self):
- templates = {
- 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
- "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
- 'stdout': '%s',
- 'stderr': '%s',
- }
- super(MonochromeProgressIndicator, self).__init__(templates)
-
- def ClearLine(self, last_line_length):
- print ("\r" + (" " * last_line_length) + "\r"),
-
- def ToProgressIndicatorProc(self):
- return progress_proc.MonochromeProgressIndicator()
-
-
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self.junitout = junitout
- self.juinttestsuite = junittestsuite
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def Done(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
- def HasRun(self, test, output, has_unexpected_output):
- fail_text = ""
- if has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % test.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=test.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def ToProgressIndicatorProc(self):
- if self.outfile != sys.stdout:
- self.outfile.close()
- return progress_proc.JUnitTestProgressIndicator(self.junitout,
- self.junittestsuite)
-
-
-class JsonTestProgressIndicator(ProgressIndicator):
-
- def __init__(self, json_test_results, arch, mode, random_seed):
- super(JsonTestProgressIndicator, self).__init__()
- self.json_test_results = json_test_results
- self.arch = arch
- self.mode = mode
- self.random_seed = random_seed
- self.results = []
- self.tests = []
-
- def ToProgressIndicatorProc(self):
- return progress_proc.JsonTestProgressIndicator(
- self.json_test_results, self.arch, self.mode, self.random_seed)
-
- def Done(self):
- complete_results = []
- if os.path.exists(self.json_test_results):
- with open(self.json_test_results, "r") as f:
- # Buildbot might start out with an empty file.
- complete_results = json.loads(f.read() or "[]")
-
- duration_mean = None
- if self.tests:
- # Get duration mean.
- duration_mean = (
- sum(duration for (_, duration) in self.tests) /
- float(len(self.tests)))
-
- # Sort tests by duration.
- self.tests.sort(key=lambda (_, duration): duration, reverse=True)
- slowest_tests = [
- {
- "name": str(test),
- "flags": test.cmd.args,
- "command": test.cmd.to_string(relative=True),
- "duration": duration,
- "marked_slow": test.is_slow,
- } for (test, duration) in self.tests[:20]
- ]
-
- complete_results.append({
- "arch": self.arch,
- "mode": self.mode,
- "results": self.results,
- "slowest_tests": slowest_tests,
- "duration_mean": duration_mean,
- "test_total": len(self.tests),
- })
-
- with open(self.json_test_results, "w") as f:
- f.write(json.dumps(complete_results))
-
- def HasRun(self, test, output, has_unexpected_output):
- # Buffer all tests for sorting the durations in the end.
- self.tests.append((test, output.duration))
- if not has_unexpected_output:
- # Omit tests that run as expected. Passing tests of reruns after failures
- # will have unexpected_output to be reported here has well.
- return
-
- self.results.append({
- "name": str(test),
- "flags": test.cmd.args,
- "command": test.cmd.to_string(relative=True),
- "run": test.run,
- "stdout": output.stdout,
- "stderr": output.stderr,
- "exit_code": output.exit_code,
- "result": test.output_proc.get_outcome(output),
- "expected": test.expected_outcomes,
- "duration": output.duration,
-
- # TODO(machenbach): This stores only the global random seed from the
- # context and not possible overrides when using random-seed stress.
- "random_seed": self.random_seed,
- "target_name": test.get_shell(),
- "variant": test.variant,
- })
-
-
-class FlakinessTestProgressIndicator(ProgressIndicator):
-
- def __init__(self, json_test_results):
- super(FlakinessTestProgressIndicator, self).__init__()
- self.json_test_results = json_test_results
- self.results = {}
- self.summary = {
- "PASS": 0,
- "FAIL": 0,
- "CRASH": 0,
- "TIMEOUT": 0,
- }
- self.seconds_since_epoch = time.time()
-
- def Done(self):
- with open(self.json_test_results, "w") as f:
- json.dump({
- "interrupted": False,
- "num_failures_by_type": self.summary,
- "path_delimiter": "/",
- "seconds_since_epoch": self.seconds_since_epoch,
- "tests": self.results,
- "version": 3,
- }, f)
-
- def HasRun(self, test, output, has_unexpected_output):
- key = test.get_id()
- outcome = test.output_proc.get_outcome(output)
- assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
- if test.run == 1:
- # First run of this test.
- self.results[key] = {
- "actual": outcome,
- "expected": " ".join(test.expected_outcomes),
- "times": [output.duration],
- }
- self.summary[outcome] = self.summary[outcome] + 1
- else:
- # This is a rerun and a previous result exists.
- result = self.results[key]
- result["actual"] = "%s %s" % (result["actual"], outcome)
- result["times"].append(output.duration)
-
-
-PROGRESS_INDICATORS = {
- 'verbose': VerboseProgressIndicator,
- 'dots': DotsProgressIndicator,
- 'color': ColorProgressIndicator,
- 'mono': MonochromeProgressIndicator
-}
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 988750d6b4..e3adaa298a 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -34,8 +34,8 @@ from utils import Freeze
# Possible outcomes
FAIL = "FAIL"
PASS = "PASS"
-TIMEOUT = "TIMEOUT" # TODO(majeski): unused in status files
-CRASH = "CRASH" # TODO(majeski): unused in status files
+TIMEOUT = "TIMEOUT"
+CRASH = "CRASH"
# Outcomes only for status file, need special handling
FAIL_OK = "FAIL_OK"
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 6a9e9831ce..4bdfd008fe 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -34,32 +34,12 @@ from . import command
from . import statusfile
from . import utils
from ..objects.testcase import TestCase
-from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
STANDARD_VARIANT = set(["default"])
-class LegacyVariantsGenerator(object):
- def __init__(self, suite, variants):
- self.suite = suite
- self.all_variants = ALL_VARIANTS & variants
- self.standard_variant = STANDARD_VARIANT & variants
-
- def FilterVariantsByTest(self, test):
- if test.only_standard_variant:
- return self.standard_variant
- return self.all_variants
-
- def GetFlagSets(self, test, variant):
- return ALL_VARIANT_FLAGS[variant]
-
-
-class StandardLegacyVariantsGenerator(LegacyVariantsGenerator):
- def FilterVariantsByTest(self, testcase):
- return self.standard_variant
-
-
class VariantsGenerator(object):
def __init__(self, variants):
self._all_variants = [v for v in variants if v in ALL_VARIANTS]
@@ -80,45 +60,58 @@ class VariantsGenerator(object):
return self._all_variants
+class TestCombiner(object):
+ def get_group_key(self, test):
+ """To indicate what tests can be combined with each other we define a group
+ key for each test. Tests with the same group key can be combined. Test
+ without a group key (None) is not combinable with any other test.
+ """
+ raise NotImplementedError()
+
+ def combine(self, name, tests):
+ """Returns test combined from `tests`. Since we identify tests by their
+ suite and name, `name` parameter should be unique within one suite.
+ """
+ return self._combined_test_class()(name, tests)
+
+ def _combined_test_class(self):
+ raise NotImplementedError()
+
+
class TestSuite(object):
@staticmethod
- def LoadTestSuite(root):
+ def LoadTestSuite(root, test_config):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module(name + "_testcfg", f, pathname, description)
- return module.GetSuite(name, root)
+ return module.GetSuite(name, root, test_config)
finally:
if f:
f.close()
- def __init__(self, name, root):
- # Note: This might be called concurrently from different processes.
+ def __init__(self, name, root, test_config):
self.name = name # string
self.root = root # string containing path
+ self.test_config = test_config
self.tests = None # list of TestCase objects
self.statusfile = None
+ self.suppress_internals = False
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
- def ListTests(self, context):
- raise NotImplementedError
-
- def _LegacyVariantsGeneratorFactory(self):
- """The variant generator class to be used."""
- return LegacyVariantsGenerator
+ def do_suppress_internals(self):
+ """Specifies if this test suite should suppress asserts based on internals.
- def CreateLegacyVariantsGenerator(self, variants):
- """Return a generator for the testing variants of this suite.
-
- Args:
- variants: List of variant names to be run as specified by the test
- runner.
- Returns: An object of type LegacyVariantsGenerator.
+ Internals are e.g. testing against the outcome of native runtime functions.
+ This is switched off on some fuzzers that violate these contracts.
"""
- return self._LegacyVariantsGeneratorFactory()(self, set(variants))
+ self.suppress_internals = True
+
+ def ListTests(self):
+ raise NotImplementedError
def get_variants_gen(self, variants):
return self._variants_gen_class()(variants)
@@ -126,11 +119,26 @@ class TestSuite(object):
def _variants_gen_class(self):
return VariantsGenerator
+ def test_combiner_available(self):
+ return bool(self._test_combiner_class())
+
+ def get_test_combiner(self):
+ cls = self._test_combiner_class()
+ if cls:
+ return cls()
+ return None
+
+ def _test_combiner_class(self):
+ """Returns Combiner subclass. None if suite doesn't support combining
+ tests.
+ """
+ return None
+
def ReadStatusFile(self, variables):
self.statusfile = statusfile.StatusFile(self.status_file(), variables)
- def ReadTestCases(self, context):
- self.tests = self.ListTests(context)
+ def ReadTestCases(self):
+ self.tests = self.ListTests()
def FilterTestCasesByStatus(self,
@@ -196,8 +204,19 @@ class TestSuite(object):
self.tests = filtered
def _create_test(self, path, **kwargs):
- test = self._test_class()(self, path, self._path_to_name(path), **kwargs)
- return test
+ if self.suppress_internals:
+ test_class = self._suppressed_test_class()
+ else:
+ test_class = self._test_class()
+ return test_class(self, path, self._path_to_name(path), self.test_config,
+ **kwargs)
+
+ def _suppressed_test_class(self):
+ """Optional testcase that suppresses assertions. Used by fuzzers that are
+ only interested in dchecks or tsan and that might violate the assertions
+ through fuzzing.
+ """
+ return self._test_class()
def _test_class(self):
raise NotImplementedError
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index bf8c3d9f7e..9834386d01 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -36,6 +36,21 @@ import subprocess
import urllib2
+### Exit codes and their meaning.
+# Normal execution.
+EXIT_CODE_PASS = 0
+# Execution with test failures.
+EXIT_CODE_FAILURES = 1
+# Execution with no tests executed.
+EXIT_CODE_NO_TESTS = 2
+# Execution aborted with SIGINT (Ctrl-C).
+EXIT_CODE_INTERRUPTED = 3
+# Execution aborted with SIGTERM.
+EXIT_CODE_TERMINATED = 4
+# Internal error.
+EXIT_CODE_INTERNAL_ERROR = 5
+
+
def GetSuitePaths(test_root):
return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index f1e9ad301e..25de235da1 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -22,8 +22,31 @@ ALL_VARIANT_FLAGS = {
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
"trusted": [["--no-untrusted-code-mitigations"]],
- "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
+ "wasm_traps": [["--wasm-trap-handler", "--invoke-weak-callbacks"]],
"wasm_no_native": [["--no-wasm-jit-to-native"]],
}
-ALL_VARIANTS = set(ALL_VARIANT_FLAGS.keys())
+SLOW_VARIANTS = set([
+ 'stress',
+ 'nooptimization',
+])
+
+FAST_VARIANTS = set([
+ 'default'
+])
+
+
+def _variant_order_key(v):
+ if v in SLOW_VARIANTS:
+ return 0
+ if v in FAST_VARIANTS:
+ return 100
+ return 50
+
+ALL_VARIANTS = sorted(ALL_VARIANT_FLAGS.keys(),
+ key=_variant_order_key)
+
+# Check {SLOW,FAST}_VARIANTS entries
+for variants in [SLOW_VARIANTS, FAST_VARIANTS]:
+ for v in variants:
+ assert v in ALL_VARIANT_FLAGS
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
new file mode 100755
index 0000000000..77effc1847
--- /dev/null
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -0,0 +1,225 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import random
+import sys
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import utils
+
+from testrunner.testproc import fuzzer
+from testrunner.testproc.base import TestProcProducer
+from testrunner.testproc.combiner import CombinerProc
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.expectation import ForgiveTimeoutProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.progress import ResultsTracker, TestsCounter
+from testrunner.utils import random_utils
+
+
+DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
+
+
+class NumFuzzer(base_runner.BaseTestRunner):
+ def __init__(self, *args, **kwargs):
+ super(NumFuzzer, self).__init__(*args, **kwargs)
+
+ def _add_parser_options(self, parser):
+ parser.add_option("--fuzzer-random-seed", default=0,
+ help="Default seed for initializing fuzzer random "
+ "generator")
+ parser.add_option("--tests-count", default=5, type="int",
+ help="Number of tests to generate from each base test. "
+ "Can be combined with --total-timeout-sec with "
+ "value 0 to provide infinite number of subtests. "
+ "When --combine-tests is set it indicates how many "
+ "tests to create in total")
+
+ # Stress gc
+ parser.add_option("--stress-marking", default=0, type="int",
+ help="probability [0-10] of adding --stress-marking "
+ "flag to the test")
+ parser.add_option("--stress-scavenge", default=0, type="int",
+ help="probability [0-10] of adding --stress-scavenge "
+ "flag to the test")
+ parser.add_option("--stress-compaction", default=0, type="int",
+ help="probability [0-10] of adding --stress-compaction "
+ "flag to the test")
+ parser.add_option("--stress-gc", default=0, type="int",
+ help="probability [0-10] of adding --random-gc-interval "
+ "flag to the test")
+ parser.add_option("--stress-thread-pool-size", default=0, type="int",
+ help="probability [0-10] of adding --thread-pool-size "
+ "flag to the test")
+
+ # Stress deopt
+ parser.add_option("--stress-deopt", default=0, type="int",
+ help="probability [0-10] of adding --deopt-every-n-times "
+ "flag to the test")
+ parser.add_option("--stress-deopt-min", default=1, type="int",
+ help="extends --stress-deopt to have minimum interval "
+ "between deopt points")
+
+ # Stress interrupt budget
+ parser.add_option("--stress-interrupt-budget", default=0, type="int",
+ help="probability [0-10] of adding --interrupt-budget "
+ "flag to the test")
+
+ # Combine multiple tests
+ parser.add_option("--combine-tests", default=False, action="store_true",
+ help="Combine multiple tests as one and run with "
+ "try-catch wrapper")
+ parser.add_option("--combine-max", default=100, type="int",
+ help="Maximum number of tests to combine")
+ parser.add_option("--combine-min", default=2, type="int",
+ help="Minimum number of tests to combine")
+
+ return parser
+
+
+ def _process_options(self, options):
+ if not options.fuzzer_random_seed:
+ options.fuzzer_random_seed = random_utils.random_seed()
+
+ if options.total_timeout_sec:
+ options.tests_count = 0
+
+ if options.combine_tests:
+ if options.combine_min > options.combine_max:
+ print ('min_group_size (%d) cannot be larger than max_group_size (%d)' %
+ options.min_group_size, options.max_group_size)
+ raise base_runner.TestRunnerError()
+
+ return True
+
+ def _get_default_suite_names(self):
+ return DEFAULT_SUITES
+
+ def _timeout_scalefactor(self, options):
+ factor = super(NumFuzzer, self)._timeout_scalefactor(options)
+ if options.stress_interrupt_budget:
+ # TODO(machenbach): This should be moved to a more generic config.
+ # Fuzzers have too much timeout in debug mode.
+ factor = max(int(factor * 0.25), 1)
+ return factor
+
+ def _get_statusfile_variables(self, options):
+ variables = (
+ super(NumFuzzer, self)._get_statusfile_variables(options))
+ variables.update({
+ 'deopt_fuzzer': bool(options.stress_deopt),
+ 'endurance_fuzzer': bool(options.combine_tests),
+ 'gc_stress': bool(options.stress_gc),
+ 'gc_fuzzer': bool(max([options.stress_marking,
+ options.stress_scavenge,
+ options.stress_compaction,
+ options.stress_gc,
+ options.stress_thread_pool_size])),
+ })
+ return variables
+
+ def _do_execute(self, tests, args, options):
+ loader = LoadProc()
+ fuzzer_rng = random.Random(options.fuzzer_random_seed)
+
+ combiner = self._create_combiner(fuzzer_rng, options)
+ results = ResultsTracker()
+ execproc = ExecutionProc(options.j)
+ sigproc = self._create_signal_proc()
+ indicators = self._create_progress_indicators(options)
+ procs = [
+ loader,
+ NameFilterProc(args) if args else None,
+ StatusFileFilterProc(None, None),
+ # TODO(majeski): Improve sharding when combiner is present. Maybe select
+ # different random seeds for shards instead of splitting tests.
+ self._create_shard_proc(options),
+ ForgiveTimeoutProc(),
+ combiner,
+ self._create_fuzzer(fuzzer_rng, options),
+ sigproc,
+ ] + indicators + [
+ results,
+ self._create_timeout_proc(options),
+ self._create_rerun_proc(options),
+ execproc,
+ ]
+ self._prepare_procs(procs)
+ loader.load_tests(tests)
+
+ # TODO(majeski): maybe some notification from loader would be better?
+ if combiner:
+ combiner.generate_initial_tests(options.j * 4)
+
+ # This starts up worker processes and blocks until all tests are
+ # processed.
+ execproc.run()
+
+ for indicator in indicators:
+ indicator.finished()
+
+ print '>>> %d tests ran' % results.total
+ if results.failed:
+ return utils.EXIT_CODE_FAILURES
+
+ # Indicate if a SIGINT or SIGTERM happened.
+ return sigproc.exit_code
+
+ def _load_suites(self, names, options):
+ suites = super(NumFuzzer, self)._load_suites(names, options)
+ if options.combine_tests:
+ suites = [s for s in suites if s.test_combiner_available()]
+ if options.stress_interrupt_budget:
+ # Changing interrupt budget forces us to suppress certain test assertions.
+ for suite in suites:
+ suite.do_suppress_internals()
+ return suites
+
+ def _create_combiner(self, rng, options):
+ if not options.combine_tests:
+ return None
+ return CombinerProc(rng, options.combine_min, options.combine_max,
+ options.tests_count)
+
+ def _create_fuzzer(self, rng, options):
+ return fuzzer.FuzzerProc(
+ rng,
+ self._tests_count(options),
+ self._create_fuzzer_configs(options),
+ self._disable_analysis(options),
+ )
+
+ def _tests_count(self, options):
+ if options.combine_tests:
+ return 1
+ return options.tests_count
+
+ def _disable_analysis(self, options):
+ """Disable analysis phase when options are used that don't support it."""
+ return options.combine_tests or options.stress_interrupt_budget
+
+ def _create_fuzzer_configs(self, options):
+ fuzzers = []
+ def add(name, prob, *args):
+ if prob:
+ fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
+
+ add('compaction', options.stress_compaction)
+ add('marking', options.stress_marking)
+ add('scavenge', options.stress_scavenge)
+ add('gc_interval', options.stress_gc)
+ add('threads', options.stress_thread_pool_size)
+ add('interrupt_budget', options.stress_interrupt_budget)
+ add('deopt', options.stress_deopt, options.stress_deopt_min)
+ return fuzzers
+
+
+if __name__ == '__main__':
+ sys.exit(NumFuzzer().execute())
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
deleted file mode 100644
index a3dd56d2dd..0000000000
--- a/deps/v8/tools/testrunner/objects/context.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class Context():
- def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
- isolates, command_prefix, extra_flags, noi18n, random_seed,
- no_sorting, rerun_failures_count, rerun_failures_max, no_harness,
- use_perf_data, sancov_dir, infra_staging=False):
- self.arch = arch
- self.mode = mode
- self.shell_dir = shell_dir
- self.mode_flags = mode_flags
- self.verbose = verbose
- self.timeout = timeout
- self.isolates = isolates
- self.command_prefix = command_prefix
- self.extra_flags = extra_flags
- self.noi18n = noi18n
- self.random_seed = random_seed
- self.no_sorting = no_sorting
- self.rerun_failures_count = rerun_failures_count
- self.rerun_failures_max = rerun_failures_max
- self.no_harness = no_harness
- self.use_perf_data = use_perf_data
- self.sancov_dir = sancov_dir
- self.infra_staging = infra_staging
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index adc33c9f12..74cec56a85 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -27,9 +27,11 @@
import signal
+import copy
from ..local import utils
+
class Output(object):
def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
@@ -40,6 +42,13 @@ class Output(object):
self.pid = pid
self.duration = duration
+ def without_text(self):
+ """Returns copy of the output without stdout and stderr."""
+ other = copy.copy(self)
+ other.stdout = None
+ other.stderr = None
+ return other
+
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
diff --git a/deps/v8/tools/testrunner/objects/predictable.py b/deps/v8/tools/testrunner/objects/predictable.py
index ad93077be9..48279d625c 100644
--- a/deps/v8/tools/testrunner/objects/predictable.py
+++ b/deps/v8/tools/testrunner/objects/predictable.py
@@ -4,6 +4,7 @@
from ..local import statusfile
from ..outproc import base as outproc_base
+from ..testproc import base as testproc_base
from ..testproc.result import Result
@@ -15,11 +16,7 @@ from ..testproc.result import Result
def get_outproc(test):
- output_proc = test.output_proc
- if output_proc.negative or statusfile.FAIL in test.expected_outcomes:
- # TODO(majeski): Skip these tests instead of having special outproc.
- return NeverUnexpectedOutputOutProc(output_proc)
- return OutProc(output_proc)
+ return OutProc(test.output_proc)
class OutProc(outproc_base.BaseOutProc):
@@ -31,9 +28,6 @@ class OutProc(outproc_base.BaseOutProc):
super(OutProc, self).__init__()
self._outproc = _outproc
- def process(self, output):
- return Result(self.has_unexpected_output(output), output)
-
def has_unexpected_output(self, output):
return output.exit_code != 0
@@ -49,9 +43,7 @@ class OutProc(outproc_base.BaseOutProc):
return self._outproc.expected_outcomes
-class NeverUnexpectedOutputOutProc(OutProc):
- """Output processor wrapper for tests that we will return False for
- has_unexpected_output in the predictable mode.
- """
- def has_unexpected_output(self, output):
- return False
+class PredictableFilterProc(testproc_base.TestProcFilter):
+ def _filter(self, test):
+ return (statusfile.FAIL in test.expected_outcomes or
+ test.output_proc.negative)
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 06db32802c..775ddfeb07 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -40,7 +40,7 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class TestCase(object):
- def __init__(self, suite, path, name):
+ def __init__(self, suite, path, name, test_config):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
@@ -49,60 +49,40 @@ class TestCase(object):
self.variant = None # name of the used testing variant
self.variant_flags = [] # list of strings, flags specific to this test
- self.id = None # int, used to map result back to TestCase instance
- self.run = 1 # The nth time this test is executed.
- self.cmd = None
-
# Fields used by the test processors.
self.origin = None # Test that this test is subtest of.
self.processor = None # Processor that created this subtest.
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped
+ # Test config contains information needed to build the command.
+ self._test_config = test_config
+ self._random_seed = None # Overrides test config value if not None
+
+ # Outcomes
self._statusfile_outcomes = None
- self._expected_outcomes = None # optimization: None == [statusfile.PASS]
+ self.expected_outcomes = None
self._statusfile_flags = None
+
self._prepare_outcomes()
def create_subtest(self, processor, subtest_id, variant=None, flags=None,
- keep_output=False):
+ keep_output=False, random_seed=None):
subtest = copy.copy(self)
subtest.origin = self
subtest.processor = processor
subtest.procid += '.%s' % subtest_id
- subtest.keep_output = keep_output
+ subtest.keep_output |= keep_output
+ if random_seed:
+ subtest._random_seed = random_seed
+ if flags:
+ subtest.variant_flags = subtest.variant_flags + flags
if variant is not None:
assert self.variant is None
subtest.variant = variant
- subtest.variant_flags = flags
subtest._prepare_outcomes()
return subtest
- def create_variant(self, variant, flags, procid_suffix=None):
- """Makes a shallow copy of the object and updates variant, variant flags and
- all fields that depend on it, e.g. expected outcomes.
-
- Args
- variant - variant name
- flags - flags that should be added to origin test's variant flags
- procid_suffix - for multiple variants with the same name set suffix to
- keep procid unique.
- """
- other = copy.copy(self)
- if not self.variant_flags:
- other.variant_flags = flags
- else:
- other.variant_flags = self.variant_flags + flags
- other.variant = variant
- if procid_suffix:
- other.procid += '[%s-%s]' % (variant, procid_suffix)
- else:
- other.procid += '[%s]' % variant
-
- other._prepare_outcomes(variant != self.variant)
-
- return other
-
def _prepare_outcomes(self, force_update=True):
if force_update or self._statusfile_outcomes is None:
def is_flag(outcome):
@@ -160,16 +140,17 @@ class TestCase(object):
def only_standard_variant(self):
return statusfile.NO_VARIANTS in self._statusfile_outcomes
- def get_command(self, context):
- params = self._get_cmd_params(context)
+ def get_command(self):
+ params = self._get_cmd_params()
env = self._get_cmd_env()
- shell, shell_flags = self._get_shell_with_flags(context)
- timeout = self._get_timeout(params, context.timeout)
- return self._create_cmd(shell, shell_flags + params, env, timeout, context)
+ shell, shell_flags = self._get_shell_with_flags()
+ timeout = self._get_timeout(params)
+ return self._create_cmd(shell, shell_flags + params, env, timeout)
- def _get_cmd_params(self, ctx):
+ def _get_cmd_params(self):
"""Gets command parameters and combines them in the following order:
- files [empty by default]
+ - random seed
- extra flags (from command line)
- user flags (variant/fuzzer flags)
- statusfile flags
@@ -180,23 +161,31 @@ class TestCase(object):
methods for getting partial parameters.
"""
return (
- self._get_files_params(ctx) +
- self._get_extra_flags(ctx) +
+ self._get_files_params() +
+ self._get_random_seed_flags() +
+ self._get_extra_flags() +
self._get_variant_flags() +
self._get_statusfile_flags() +
- self._get_mode_flags(ctx) +
+ self._get_mode_flags() +
self._get_source_flags() +
- self._get_suite_flags(ctx)
+ self._get_suite_flags()
)
def _get_cmd_env(self):
return {}
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return []
- def _get_extra_flags(self, ctx):
- return ctx.extra_flags
+ def _get_random_seed_flags(self):
+ return ['--random-seed=%d' % self.random_seed]
+
+ @property
+ def random_seed(self):
+ return self._random_seed or self._test_config.random_seed
+
+ def _get_extra_flags(self):
+ return self._test_config.extra_flags
def _get_variant_flags(self):
return self.variant_flags
@@ -208,27 +197,26 @@ class TestCase(object):
"""
return self._statusfile_flags
- def _get_mode_flags(self, ctx):
- return ctx.mode_flags
+ def _get_mode_flags(self):
+ return self._test_config.mode_flags
def _get_source_flags(self):
return []
- def _get_suite_flags(self, ctx):
+ def _get_suite_flags(self):
return []
- def _get_shell_with_flags(self, ctx):
+ def _get_shell_with_flags(self):
shell = self.get_shell()
shell_flags = []
if shell == 'd8':
shell_flags.append('--test')
if utils.IsWindows():
shell += '.exe'
- if ctx.random_seed:
- shell_flags.append('--random-seed=%s' % ctx.random_seed)
return shell, shell_flags
- def _get_timeout(self, params, timeout):
+ def _get_timeout(self, params):
+ timeout = self._test_config.timeout
if "--stress-opt" in params:
timeout *= 4
if "--noenable-vfp3" in params:
@@ -244,14 +232,14 @@ class TestCase(object):
def _get_suffix(self):
return '.js'
- def _create_cmd(self, shell, params, env, timeout, ctx):
+ def _create_cmd(self, shell, params, env, timeout):
return command.Command(
- cmd_prefix=ctx.command_prefix,
- shell=os.path.abspath(os.path.join(ctx.shell_dir, shell)),
+ cmd_prefix=self._test_config.command_prefix,
+ shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
args=params,
env=env,
timeout=timeout,
- verbose=ctx.verbose
+ verbose=self._test_config.verbose
)
def _parse_source_flags(self, source=None):
@@ -281,18 +269,9 @@ class TestCase(object):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
return cmp(
- (self.suite.name, self.name, self.variant_flags),
- (other.suite.name, other.name, other.variant_flags)
+ (self.suite.name, self.name, self.variant),
+ (other.suite.name, other.name, other.variant)
)
- def __hash__(self):
- return hash((self.suite.name, self.name, ''.join(self.variant_flags)))
-
def __str__(self):
return self.suite.name + '/' + self.name
-
- # TODO(majeski): Rename `id` field or `get_id` function since they're
- # unrelated.
- def get_id(self):
- return '%s/%s %s' % (
- self.suite.name, self.name, ' '.join(self.variant_flags))
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index 9a9db4e81d..d1953dda99 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -2,24 +2,45 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import collections
import itertools
+from ..testproc.base import (
+ DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
from ..local import statusfile
from ..testproc.result import Result
OUTCOMES_PASS = [statusfile.PASS]
OUTCOMES_FAIL = [statusfile.FAIL]
+OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
+OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
class BaseOutProc(object):
- def process(self, output):
- return Result(self.has_unexpected_output(output), output)
+ def process(self, output, reduction=None):
+ has_unexpected_output = self.has_unexpected_output(output)
+ return self._create_result(has_unexpected_output, output, reduction)
def has_unexpected_output(self, output):
return self.get_outcome(output) not in self.expected_outcomes
+ def _create_result(self, has_unexpected_output, output, reduction):
+ """Creates Result instance. When reduction is passed it tries to drop some
+ parts of the result to save memory and time needed to send the result
+ across process boundary. None disables reduction and full result is created.
+ """
+ if reduction == DROP_RESULT:
+ return None
+ if reduction == DROP_OUTPUT:
+ return Result(has_unexpected_output, None)
+ if not has_unexpected_output:
+ if reduction == DROP_PASS_OUTPUT:
+ return Result(has_unexpected_output, None)
+ if reduction == DROP_PASS_STDOUT:
+ return Result(has_unexpected_output, output.without_text())
+
+ return Result(has_unexpected_output, output)
+
def get_outcome(self, output):
if output.HasCrashed():
return statusfile.CRASH
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 3be2099252..d3d2bd53a6 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -5,47 +5,34 @@
# found in the LICENSE file.
-from collections import OrderedDict
-from os.path import join
-import multiprocessing
import os
-import random
-import shlex
-import subprocess
+import re
import sys
-import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
from testrunner.local import utils
-from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
-from testrunner.objects import context
from testrunner.objects import predictable
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
-from testrunner.testproc.progress import (VerboseProgressIndicator,
- ResultsTracker,
- TestsCounter)
-from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.progress import ResultsTracker, TestsCounter
+from testrunner.testproc.seed import SeedProc
from testrunner.testproc.variant import VariantProc
+from testrunner.utils import random_utils
-TIMEOUT_DEFAULT = 60
+ARCH_GUESS = utils.DefaultArch()
-# Variants ordered by expected runtime (slowest first).
VARIANTS = ["default"]
MORE_VARIANTS = [
- "stress",
- "stress_incremental_marking",
"nooptimization",
+ "stress",
"stress_background_compile",
+ "stress_incremental_marking",
"wasm_traps",
]
@@ -57,7 +44,7 @@ VARIANT_ALIASES = {
# Shortcut for the two above ("more" first - it has the longer running tests).
"exhaustive": MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
- "extra": ["future", "liftoff", "trusted"],
+ "extra": ["future", "liftoff", "trusted", "wasm_no_native"],
}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
@@ -65,15 +52,9 @@ GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
- "mips",
- "mipsel",
- "mips64",
- "mips64el",
- "s390",
- "s390x",
- "arm64"]
+RANDOM_GC_STRESS_FLAGS = ["--random-gc-interval=5000",
+ "--stress-compaction-random"]
+
PREDICTABLE_WRAPPER = os.path.join(
base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
@@ -84,72 +65,18 @@ class StandardTestRunner(base_runner.BaseTestRunner):
super(StandardTestRunner, self).__init__(*args, **kwargs)
self.sancov_dir = None
+ self._variants = None
def _get_default_suite_names(self):
return ['default']
- def _do_execute(self, suites, args, options):
- if options.swarming:
- # Swarming doesn't print how isolated commands are called. Lets make
- # this less cryptic by printing it ourselves.
- print ' '.join(sys.argv)
-
- if utils.GuessOS() == "macos":
- # TODO(machenbach): Temporary output for investigating hanging test
- # driver on mac.
- print "V8 related processes running on this host:"
- try:
- print subprocess.check_output(
- "ps -e | egrep 'd8|cctest|unittests'", shell=True)
- except Exception:
- pass
-
- return self._execute(args, options, suites)
-
def _add_parser_options(self, parser):
- parser.add_option("--sancov-dir",
- help="Directory where to collect coverage data")
- parser.add_option("--cfi-vptr",
- help="Run tests with UBSAN cfi_vptr option.",
- default=False, action="store_true")
parser.add_option("--novfp3",
help="Indicates that V8 was compiled without VFP3"
" support",
default=False, action="store_true")
- parser.add_option("--cat", help="Print the source of the tests",
- default=False, action="store_true")
- parser.add_option("--slow-tests",
- help="Regard slow tests (run|skip|dontcare)",
- default="dontcare")
- parser.add_option("--pass-fail-tests",
- help="Regard pass|fail tests (run|skip|dontcare)",
- default="dontcare")
- parser.add_option("--gc-stress",
- help="Switch on GC stress mode",
- default=False, action="store_true")
- parser.add_option("--command-prefix",
- help="Prepended to each shell command used to run a"
- " test",
- default="")
- parser.add_option("--extra-flags",
- help="Additional flags to pass to each test command",
- action="append", default=[])
- parser.add_option("--infra-staging", help="Use new test runner features",
- default=False, action="store_true")
- parser.add_option("--isolates", help="Whether to test isolates",
- default=False, action="store_true")
- parser.add_option("-j", help="The number of parallel tasks to run",
- default=0, type="int")
- parser.add_option("--no-harness", "--noharness",
- help="Run without test harness of a given suite",
- default=False, action="store_true")
- parser.add_option("--no-presubmit", "--nopresubmit",
- help='Skip presubmit checks (deprecated)',
- default=False, dest="no_presubmit", action="store_true")
- parser.add_option("--no-sorting", "--nosorting",
- help="Don't sort tests according to duration of last"
- " run.",
- default=False, dest="no_sorting", action="store_true")
+
+ # Variants
parser.add_option("--no-variants", "--novariants",
help="Deprecated. "
"Equivalent to passing --variants=default",
@@ -161,67 +88,80 @@ class StandardTestRunner(base_runner.BaseTestRunner):
default=False, action="store_true",
help="Deprecated. "
"Equivalent to passing --variants=exhaustive")
- parser.add_option("-p", "--progress",
- help=("The style of progress indicator"
- " (verbose, dots, color, mono)"),
- choices=progress.PROGRESS_INDICATORS.keys(),
- default="mono")
+
+ # Filters
+ parser.add_option("--slow-tests", default="dontcare",
+ help="Regard slow tests (run|skip|dontcare)")
+ parser.add_option("--pass-fail-tests", default="dontcare",
+ help="Regard pass|fail tests (run|skip|dontcare)")
parser.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
- parser.add_option("--report", help="Print a summary of the tests to be"
- " run",
- default=False, action="store_true")
- parser.add_option("--json-test-results",
- help="Path to a file for storing json results.")
- parser.add_option("--flakiness-results",
- help="Path to a file for storing flakiness json.")
- parser.add_option("--rerun-failures-count",
- help=("Number of times to rerun each failing test case."
- " Very slow tests will be rerun only once."),
- default=0, type="int")
- parser.add_option("--rerun-failures-max",
- help="Maximum number of failing test cases to rerun.",
- default=100, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
- parser.add_option("--swarming",
- help="Indicates running test driver on swarming.",
+
+ # Stress modes
+ parser.add_option("--gc-stress",
+ help="Switch on GC stress mode",
+ default=False, action="store_true")
+ parser.add_option("--random-gc-stress",
+ help="Switch on random GC stress mode",
+ default=False, action="store_true")
+ parser.add_option("--random-seed-stress-count", default=1, type="int",
+ dest="random_seed_stress_count",
+ help="Number of runs with different random seeds. Only "
+ "with test processors: 0 means infinite "
+ "generation.")
+
+ # Noop
+ parser.add_option("--cfi-vptr",
+ help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
+ parser.add_option("--infra-staging", help="Use new test runner features",
+ dest='infra_staging', default=None,
+ action="store_true")
+ parser.add_option("--no-infra-staging",
+ help="Opt out of new test runner features",
+ dest='infra_staging', default=None,
+ action="store_false")
+ parser.add_option("--no-sorting", "--nosorting",
+ help="Don't sort tests according to duration of last"
+ " run.",
+ default=False, dest="no_sorting", action="store_true")
+ parser.add_option("--no-presubmit", "--nopresubmit",
+ help='Skip presubmit checks (deprecated)',
+ default=False, dest="no_presubmit", action="store_true")
+
+ # Unimplemented for test processors
+ parser.add_option("--sancov-dir",
+ help="Directory where to collect coverage data")
+ parser.add_option("--cat", help="Print the source of the tests",
+ default=False, action="store_true")
+ parser.add_option("--flakiness-results",
+ help="Path to a file for storing flakiness json.")
parser.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
- parser.add_option("-t", "--timeout", help="Timeout in seconds",
- default=TIMEOUT_DEFAULT, type="int")
parser.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite",
- help="The testsuite name in the JUnit output file",
- default="v8tests")
- parser.add_option("--random-seed", default=0, dest="random_seed",
- help="Default seed for initializing random generator",
- type=int)
- parser.add_option("--random-seed-stress-count", default=1, type="int",
- dest="random_seed_stress_count",
- help="Number of runs with different random seeds")
+ parser.add_option("--report", default=False, action="store_true",
+ help="Print a summary of the tests to be run")
- def _process_options(self, options):
- global VARIANTS
+ def _process_options(self, options):
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
if not os.path.exists(self.sancov_dir):
print("sancov-dir %s doesn't exist" % self.sancov_dir)
raise base_runner.TestRunnerError()
- options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
-
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
+ if options.random_gc_stress:
+ options.extra_flags += RANDOM_GC_STRESS_FLAGS
+
if self.build_config.asan:
options.extra_flags.append("--invoke-weak-callbacks")
options.extra_flags.append("--omit-quit")
@@ -265,31 +205,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if self.build_config.msan:
options.variants = "default"
- if options.j == 0:
- options.j = multiprocessing.cpu_count()
-
- if options.random_seed_stress_count <= 1 and options.random_seed == 0:
- options.random_seed = self._random_seed()
-
- # Use developer defaults if no variant was specified.
- options.variants = options.variants or "dev"
-
if options.variants == "infra_staging":
options.variants = "exhaustive"
- options.infra_staging = True
- # Resolve variant aliases and dedupe.
- # TODO(machenbach): Don't mutate global variable. Rather pass mutated
- # version as local variable.
- VARIANTS = list(set(reduce(
- list.__add__,
- (VARIANT_ALIASES.get(v, [v]) for v in options.variants.split(",")),
- [],
- )))
-
- if not set(VARIANTS).issubset(ALL_VARIANTS):
- print "All variants must be in %s" % str(ALL_VARIANTS)
- raise base_runner.TestRunnerError()
+ self._variants = self._parse_variants(options.variants)
def CheckTestMode(name, option): # pragma: no cover
if not option in ["run", "skip", "dontcare"]:
@@ -303,6 +222,23 @@ class StandardTestRunner(base_runner.BaseTestRunner):
# TODO(machenbach): uncomment after infra side lands.
# base_runner.TEST_MAP["d8_default"].remove("intl")
+ def _parse_variants(self, aliases_str):
+ # Use developer defaults if no variant was specified.
+ aliases_str = aliases_str or 'dev'
+ aliases = aliases_str.split(',')
+ user_variants = set(reduce(
+ list.__add__, [VARIANT_ALIASES.get(a, [a]) for a in aliases]))
+
+ result = [v for v in ALL_VARIANTS if v in user_variants]
+ if len(result) == len(user_variants):
+ return result
+
+ for v in user_variants:
+ if v not in ALL_VARIANTS:
+ print 'Unknown variant: %s' % v
+ raise base_runner.TestRunnerError()
+ assert False, 'Unreachable'
+
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
@@ -316,228 +252,39 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"allow_user_segv_handler=1",
])
- def _random_seed(self):
- seed = 0
- while not seed:
- seed = random.SystemRandom().randint(-2147483648, 2147483647)
- return seed
-
- def _execute(self, args, options, suites):
- print(">>> Running tests for %s.%s" % (self.build_config.arch,
- self.mode_name))
- # Populate context object.
-
- # Simulators are slow, therefore allow a longer timeout.
- if self.build_config.arch in SLOW_ARCHS:
- options.timeout *= 2
+ def _get_statusfile_variables(self, options):
+ variables = (
+ super(StandardTestRunner, self)._get_statusfile_variables(options))
- options.timeout *= self.mode_options.timeout_scalefactor
-
- if self.build_config.predictable:
- # Predictable mode is slower.
- options.timeout *= 2
-
- ctx = context.Context(self.build_config.arch,
- self.mode_options.execution_mode,
- self.outdir,
- self.mode_options.flags,
- options.verbose,
- options.timeout,
- options.isolates,
- options.command_prefix,
- options.extra_flags,
- self.build_config.no_i18n,
- options.random_seed,
- options.no_sorting,
- options.rerun_failures_count,
- options.rerun_failures_max,
- options.no_harness,
- use_perf_data=not options.swarming,
- sancov_dir=self.sancov_dir,
- infra_staging=options.infra_staging)
-
- # TODO(all): Combine "simulator" and "simulator_run".
- # TODO(machenbach): In GN we can derive simulator run from
- # target_arch != v8_target_arch in the dumped build config.
simulator_run = (
not options.dont_skip_simulator_slow_tests and
self.build_config.arch in [
'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
'ppc64', 's390', 's390x'] and
- bool(base_runner.ARCH_GUESS) and
- self.build_config.arch != base_runner.ARCH_GUESS)
- # Find available test suites and read test cases from them.
- variables = {
- "arch": self.build_config.arch,
- "asan": self.build_config.asan,
- "byteorder": sys.byteorder,
- "dcheck_always_on": self.build_config.dcheck_always_on,
- "deopt_fuzzer": False,
- "gc_fuzzer": False,
- "gc_stress": options.gc_stress,
- "gcov_coverage": self.build_config.gcov_coverage,
- "isolates": options.isolates,
- "mode": self.mode_options.status_mode,
- "msan": self.build_config.msan,
- "no_harness": options.no_harness,
- "no_i18n": self.build_config.no_i18n,
- "no_snap": self.build_config.no_snap,
- "novfp3": options.novfp3,
- "predictable": self.build_config.predictable,
- "simulator": utils.UseSimulator(self.build_config.arch),
- "simulator_run": simulator_run,
- "system": utils.GuessOS(),
- "tsan": self.build_config.tsan,
- "ubsan_vptr": self.build_config.ubsan_vptr,
- }
-
- progress_indicator = progress.IndicatorNotifier()
- progress_indicator.Register(
- progress.PROGRESS_INDICATORS[options.progress]())
- if options.junitout: # pragma: no cover
- progress_indicator.Register(progress.JUnitTestProgressIndicator(
- options.junitout, options.junittestsuite))
- if options.json_test_results:
- progress_indicator.Register(progress.JsonTestProgressIndicator(
- options.json_test_results,
- self.build_config.arch,
- self.mode_options.execution_mode,
- ctx.random_seed))
- if options.flakiness_results: # pragma: no cover
- progress_indicator.Register(progress.FlakinessTestProgressIndicator(
- options.flakiness_results))
-
- if options.infra_staging:
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
-
- return self._run_test_procs(suites, args, options, progress_indicator,
- ctx)
-
- all_tests = []
- num_tests = 0
- for s in suites:
- s.ReadStatusFile(variables)
- s.ReadTestCases(ctx)
- if len(args) > 0:
- s.FilterTestCasesByArgs(args)
- all_tests += s.tests
-
- # First filtering by status applying the generic rules (tests without
- # variants)
- if options.warn_unused:
- tests = [(t.name, t.variant) for t in s.tests]
- s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
- s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
-
- if options.cat:
- verbose.PrintTestSource(s.tests)
- continue
- variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
- variant_tests = [ t.create_variant(v, flags)
- for t in s.tests
- for v in variant_gen.FilterVariantsByTest(t)
- for flags in variant_gen.GetFlagSets(t, v) ]
-
- if options.random_seed_stress_count > 1:
- # Duplicate test for random seed stress mode.
- def iter_seed_flags():
- for _ in range(0, options.random_seed_stress_count):
- # Use given random seed for all runs (set by default in
- # execution.py) or a new random seed if none is specified.
- if options.random_seed:
- yield []
- else:
- yield ["--random-seed=%d" % self._random_seed()]
- s.tests = [
- t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
- for t in variant_tests
- for n, flags in enumerate(iter_seed_flags())
- ]
- else:
- s.tests = variant_tests
-
- # Second filtering by status applying also the variant-dependent rules.
- if options.warn_unused:
- tests = [(t.name, t.variant) for t in s.tests]
- s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
-
- s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
- s.tests = self._shard_tests(s.tests, options)
-
- for t in s.tests:
- t.cmd = t.get_command(ctx)
-
- num_tests += len(s.tests)
-
- if options.cat:
- return 0 # We're done here.
-
- if options.report:
- verbose.PrintReport(all_tests)
-
- # Run the tests.
- start_time = time.time()
-
- if self.build_config.predictable:
- outproc_factory = predictable.get_outproc
- else:
- outproc_factory = None
-
- runner = execution.Runner(suites, progress_indicator, ctx,
- outproc_factory)
- exit_code = runner.Run(options.j)
- overall_duration = time.time() - start_time
+ bool(ARCH_GUESS) and
+ self.build_config.arch != ARCH_GUESS)
- if options.time:
- verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
+ variables.update({
+ 'gc_stress': options.gc_stress or options.random_gc_stress,
+ 'novfp3': options.novfp3,
+ 'simulator_run': simulator_run,
+ })
+ return variables
- if num_tests == 0:
- print("Warning: no tests were run!")
-
- if exit_code == 1 and options.json_test_results:
- print("Force exit code 0 after failures. Json test results file "
- "generated with failure information.")
- exit_code = 0
-
- if self.sancov_dir:
- # If tests ran with sanitizer coverage, merge coverage files in the end.
- try:
- print "Merging sancov files."
- subprocess.check_call([
- sys.executable,
- join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
- "--coverage-dir=%s" % self.sancov_dir])
- except:
- print >> sys.stderr, "Error: Merging sancov files failed."
- exit_code = 1
-
- return exit_code
-
- def _shard_tests(self, tests, options):
- shard_run, shard_count = self._get_shard_info(options)
-
- if shard_count < 2:
- return tests
- count = 0
- shard = []
- for test in tests:
- if count % shard_count == shard_run - 1:
- shard.append(test)
- count += 1
- return shard
-
- def _run_test_procs(self, suites, args, options, progress_indicator,
- context):
+ def _do_execute(self, tests, args, options):
jobs = options.j
print '>>> Running with test processors'
loader = LoadProc()
tests_counter = TestsCounter()
results = ResultsTracker()
- indicators = progress_indicator.ToProgressIndicatorProcs()
- execproc = ExecutionProc(jobs, context)
+ indicators = self._create_progress_indicators(options)
+
+ outproc_factory = None
+ if self.build_config.predictable:
+ outproc_factory = predictable.get_outproc
+ execproc = ExecutionProc(jobs, outproc_factory)
+ sigproc = self._create_signal_proc()
procs = [
loader,
@@ -545,54 +292,60 @@ class StandardTestRunner(base_runner.BaseTestRunner):
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_shard_proc(options),
tests_counter,
- VariantProc(VARIANTS),
+ VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+ self._create_predictable_filter(),
+ self._create_seed_proc(options),
+ sigproc,
] + indicators + [
results,
- self._create_rerun_proc(context),
+ self._create_timeout_proc(options),
+ self._create_rerun_proc(options),
execproc,
]
- procs = filter(None, procs)
-
- for i in xrange(0, len(procs) - 1):
- procs[i].connect_to(procs[i + 1])
-
- tests = [t for s in suites for t in s.tests]
+ self._prepare_procs(procs)
tests.sort(key=lambda t: t.is_slow, reverse=True)
- loader.setup()
loader.load_tests(tests)
print '>>> Running %d base tests' % tests_counter.total
tests_counter.remove_from_chain()
- execproc.start()
+ # This starts up worker processes and blocks until all tests are
+ # processed.
+ execproc.run()
for indicator in indicators:
indicator.finished()
- print '>>> %d tests ran' % results.total
+ print '>>> %d tests ran' % (results.total - results.remaining)
- exit_code = 0
+ exit_code = utils.EXIT_CODE_PASS
if results.failed:
- exit_code = 1
- if results.remaining:
- exit_code = 2
+ exit_code = utils.EXIT_CODE_FAILURES
+ if not results.total:
+ exit_code = utils.EXIT_CODE_NO_TESTS
+ # Indicate if a SIGINT or SIGTERM happened.
+ exit_code = max(exit_code, sigproc.exit_code)
- if exit_code == 1 and options.json_test_results:
+ if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
- exit_code = 0
+ exit_code = utils.EXIT_CODE_PASS
return exit_code
- def _create_rerun_proc(self, ctx):
- if not ctx.rerun_failures_count:
+ def _create_predictable_filter(self):
+ if not self.build_config.predictable:
return None
- return RerunProc(ctx.rerun_failures_count,
- ctx.rerun_failures_max)
+ return predictable.PredictableFilterProc()
+ def _create_seed_proc(self, options):
+ if options.random_seed_stress_count == 1:
+ return None
+ return SeedProc(options.random_seed_stress_count, options.random_seed,
+ options.j * 4)
if __name__ == '__main__':
diff --git a/deps/v8/tools/testrunner/test_config.py b/deps/v8/tools/testrunner/test_config.py
new file mode 100644
index 0000000000..d9418fe9ac
--- /dev/null
+++ b/deps/v8/tools/testrunner/test_config.py
@@ -0,0 +1,32 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+from .utils import random_utils
+
+
+class TestConfig(object):
+ def __init__(self,
+ command_prefix,
+ extra_flags,
+ isolates,
+ mode_flags,
+ no_harness,
+ noi18n,
+ random_seed,
+ shell_dir,
+ timeout,
+ verbose):
+ self.command_prefix = command_prefix
+ self.extra_flags = extra_flags
+ self.isolates = isolates
+ self.mode_flags = mode_flags
+ self.no_harness = no_harness
+ self.noi18n = noi18n
+ # random_seed is always not None.
+ self.random_seed = random_seed or random_utils.random_seed()
+ self.shell_dir = shell_dir
+ self.timeout = timeout
+ self.verbose = verbose
diff --git a/deps/v8/tools/testrunner/testproc/base.py b/deps/v8/tools/testrunner/testproc/base.py
index 1a87dbed55..5cb1182e89 100644
--- a/deps/v8/tools/testrunner/testproc/base.py
+++ b/deps/v8/tools/testrunner/testproc/base.py
@@ -37,36 +37,12 @@ DROP_OUTPUT = 1
DROP_PASS_OUTPUT = 2
DROP_PASS_STDOUT = 3
-def get_reduce_result_function(requirement):
- if requirement == DROP_RESULT:
- return lambda _: None
-
- if requirement == DROP_OUTPUT:
- def f(result):
- result.output = None
- return result
- return f
-
- if requirement == DROP_PASS_OUTPUT:
- def f(result):
- if not result.has_unexpected_output:
- result.output = None
- return result
- return f
-
- if requirement == DROP_PASS_STDOUT:
- def f(result):
- if not result.has_unexpected_output:
- result.output.stdout = None
- result.output.stderr = None
- return result
- return f
-
class TestProc(object):
def __init__(self):
self._prev_proc = None
self._next_proc = None
+ self._stopped = False
self._requirement = DROP_RESULT
self._prev_requirement = None
self._reduce_result = lambda result: result
@@ -90,8 +66,14 @@ class TestProc(object):
self._prev_requirement = requirement
if self._next_proc:
self._next_proc.setup(max(requirement, self._requirement))
- if self._prev_requirement < self._requirement:
- self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+ # Since we're not winning anything by droping part of the result we are
+ # dropping the whole result or pass it as it is. The real reduction happens
+ # during result creation (in the output processor), so the result is
+ # immutable.
+ if (self._prev_requirement < self._requirement and
+ self._prev_requirement == DROP_RESULT):
+ self._reduce_result = lambda _: None
def next_test(self, test):
"""
@@ -111,6 +93,18 @@ class TestProc(object):
if self._prev_proc:
self._prev_proc.heartbeat()
+ def stop(self):
+ if not self._stopped:
+ self._stopped = True
+ if self._prev_proc:
+ self._prev_proc.stop()
+ if self._next_proc:
+ self._next_proc.stop()
+
+ @property
+ def is_stopped(self):
+ return self._stopped
+
### Communication
def _send_test(self, test):
@@ -119,7 +113,8 @@ class TestProc(object):
def _send_result(self, test, result):
"""Helper method for sending result to the previous processor."""
- result = self._reduce_result(result)
+ if not test.keep_output:
+ result = self._reduce_result(result)
self._prev_proc.result_for(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/combiner.py b/deps/v8/tools/testrunner/testproc/combiner.py
new file mode 100644
index 0000000000..50944e1e5e
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/combiner.py
@@ -0,0 +1,124 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+import time
+
+from . import base
+from ..objects import testcase
+from ..outproc import base as outproc
+
+
+class CombinerProc(base.TestProc):
+ def __init__(self, rng, min_group_size, max_group_size, count):
+ """
+ Args:
+ rng: random number generator
+ min_group_size: minimum number of tests to combine
+ max_group_size: maximum number of tests to combine
+ count: how many tests to generate. 0 means infinite running
+ """
+ super(CombinerProc, self).__init__()
+
+ self._rng = rng
+ self._min_size = min_group_size
+ self._max_size = max_group_size
+ self._count = count
+
+ # Index of the last generated test
+ self._current_num = 0
+
+ # {suite name: instance of TestGroups}
+ self._groups = defaultdict(TestGroups)
+
+ # {suite name: instance of TestCombiner}
+ self._combiners = {}
+
+ def setup(self, requirement=base.DROP_RESULT):
+ # Combiner is not able to pass results (even as None) to the previous
+ # processor.
+ assert requirement == base.DROP_RESULT
+ self._next_proc.setup(base.DROP_RESULT)
+
+ def next_test(self, test):
+ group_key = self._get_group_key(test)
+ if not group_key:
+ # Test not suitable for combining
+ return
+
+ self._groups[test.suite.name].add_test(group_key, test)
+
+ def _get_group_key(self, test):
+ combiner = self._get_combiner(test.suite)
+ if not combiner:
+ print ('>>> Warning: There is no combiner for %s testsuite' %
+ test.suite.name)
+ return None
+ return combiner.get_group_key(test)
+
+ def result_for(self, test, result):
+ self._send_next_test()
+
+ def generate_initial_tests(self, num=1):
+ for _ in xrange(0, num):
+ self._send_next_test()
+
+ def _send_next_test(self):
+ if self.is_stopped:
+ return
+
+ if self._count and self._current_num >= self._count:
+ return
+
+ combined_test = self._create_new_test()
+ if not combined_test:
+ # Not enough tests
+ return
+
+ self._send_test(combined_test)
+
+ def _create_new_test(self):
+ suite, combiner = self._select_suite()
+ groups = self._groups[suite]
+
+ max_size = self._rng.randint(self._min_size, self._max_size)
+ sample = groups.sample(self._rng, max_size)
+ if not sample:
+ return None
+
+ self._current_num += 1
+ return combiner.combine('%s-%d' % (suite, self._current_num), sample)
+
+ def _select_suite(self):
+ """Returns pair (suite name, combiner)."""
+ selected = self._rng.randint(0, len(self._groups) - 1)
+ for n, suite in enumerate(self._groups):
+ if n == selected:
+ return suite, self._combiners[suite]
+
+ def _get_combiner(self, suite):
+ combiner = self._combiners.get(suite.name)
+ if not combiner:
+ combiner = suite.get_test_combiner()
+ self._combiners[suite.name] = combiner
+ return combiner
+
+
+class TestGroups(object):
+ def __init__(self):
+ self._groups = defaultdict(list)
+ self._keys = []
+
+ def add_test(self, key, test):
+ self._groups[key].append(test)
+ self._keys.append(key)
+
+ def sample(self, rng, max_size):
+ # Not enough tests
+ if not self._groups:
+ return None
+
+ group_key = rng.choice(self._keys)
+ tests = self._groups[group_key]
+ return [rng.choice(tests) for _ in xrange(0, max_size)]
diff --git a/deps/v8/tools/testrunner/testproc/execution.py b/deps/v8/tools/testrunner/testproc/execution.py
index 021b02af3e..2d1ea02cd0 100644
--- a/deps/v8/tools/testrunner/testproc/execution.py
+++ b/deps/v8/tools/testrunner/testproc/execution.py
@@ -15,12 +15,12 @@ def run_job(job, process_context):
return job.run(process_context)
-def create_process_context(requirement):
- return ProcessContext(base.get_reduce_result_function(requirement))
+def create_process_context(result_reduction):
+ return ProcessContext(result_reduction)
JobResult = collections.namedtuple('JobResult', ['id', 'result'])
-ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+ProcessContext = collections.namedtuple('ProcessContext', ['result_reduction'])
class Job(object):
@@ -32,9 +32,8 @@ class Job(object):
def run(self, process_ctx):
output = self.cmd.execute()
- result = self.outproc.process(output)
- if not self.keep_output:
- result = process_ctx.reduce_result_f(result)
+ reduction = process_ctx.result_reduction if not self.keep_output else None
+ result = self.outproc.process(output, reduction)
return JobResult(self.test_id, result)
@@ -44,49 +43,51 @@ class ExecutionProc(base.TestProc):
sends results to the previous processor.
"""
- def __init__(self, jobs, context):
+ def __init__(self, jobs, outproc_factory=None):
super(ExecutionProc, self).__init__()
self._pool = pool.Pool(jobs)
- self._context = context
+ self._outproc_factory = outproc_factory or (lambda t: t.output_proc)
self._tests = {}
def connect_to(self, next_proc):
assert False, 'ExecutionProc cannot be connected to anything'
- def start(self):
- try:
- it = self._pool.imap_unordered(
+ def run(self):
+ it = self._pool.imap_unordered(
fn=run_job,
gen=[],
process_context_fn=create_process_context,
process_context_args=[self._prev_requirement],
- )
- for pool_result in it:
- if pool_result.heartbeat:
- continue
-
- job_result = pool_result.value
- test_id, result = job_result
-
- test, result.cmd = self._tests[test_id]
- del self._tests[test_id]
- self._send_result(test, result)
- except KeyboardInterrupt:
- raise
- except:
- traceback.print_exc()
- raise
- finally:
- self._pool.terminate()
+ )
+ for pool_result in it:
+ self._unpack_result(pool_result)
def next_test(self, test):
+ if self.is_stopped:
+ return
+
test_id = test.procid
- cmd = test.get_command(self._context)
+ cmd = test.get_command()
self._tests[test_id] = test, cmd
- # TODO(majeski): Needs factory for outproc as in local/execution.py
- outproc = test.output_proc
+ outproc = self._outproc_factory(test)
self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
def result_for(self, test, result):
assert False, 'ExecutionProc cannot receive results'
+
+ def stop(self):
+ super(ExecutionProc, self).stop()
+ self._pool.abort()
+
+ def _unpack_result(self, pool_result):
+ if pool_result.heartbeat:
+ self.heartbeat()
+ return
+
+ job_result = pool_result.value
+ test_id, result = job_result
+
+ test, result.cmd = self._tests[test_id]
+ del self._tests[test_id]
+ self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/expectation.py b/deps/v8/tools/testrunner/testproc/expectation.py
new file mode 100644
index 0000000000..607c010cf3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/expectation.py
@@ -0,0 +1,27 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+from testrunner.local import statusfile
+from testrunner.outproc import base as outproc
+
+class ForgiveTimeoutProc(base.TestProcProducer):
+ """Test processor passing tests and results through and forgiving timeouts."""
+ def __init__(self):
+ super(ForgiveTimeoutProc, self).__init__('no-timeout')
+
+ def _next_test(self, test):
+ subtest = self._create_subtest(test, 'no_timeout')
+ if subtest.expected_outcomes == outproc.OUTCOMES_PASS:
+ subtest.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+ elif subtest.expected_outcomes == outproc.OUTCOMES_FAIL:
+ subtest.expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
+ elif statusfile.TIMEOUT not in subtest.expected_outcomes:
+ subtest.expected_outcomes = (
+ subtest.expected_outcomes + [statusfile.TIMEOUT])
+ self._send_test(subtest)
+
+ def _result_for(self, test, subtest, result):
+ self._send_result(test, result)
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
new file mode 100644
index 0000000000..624b9aac04
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -0,0 +1,287 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+import time
+
+from . import base
+
+
+class FuzzerConfig(object):
+ def __init__(self, probability, analyzer, fuzzer):
+ """
+ Args:
+ probability: of choosing this fuzzer (0; 10]
+ analyzer: instance of Analyzer class, can be None if no analysis is needed
+ fuzzer: instance of Fuzzer class
+ """
+ assert probability > 0 and probability <= 10
+
+ self.probability = probability
+ self.analyzer = analyzer
+ self.fuzzer = fuzzer
+
+
+class Analyzer(object):
+ def get_analysis_flags(self):
+ raise NotImplementedError()
+
+ def do_analysis(self, result):
+ raise NotImplementedError()
+
+
+class Fuzzer(object):
+ def create_flags_generator(self, rng, test, analysis_value):
+ """
+ Args:
+ rng: random number generator
+ test: test for which to create flags
+ analysis_value: value returned by the analyzer. None if there is no
+ corresponding analyzer to this fuzzer or the analysis phase is disabled
+ """
+ raise NotImplementedError()
+
+
+# TODO(majeski): Allow multiple subtests to run at once.
+class FuzzerProc(base.TestProcProducer):
+ def __init__(self, rng, count, fuzzers, disable_analysis=False):
+ """
+ Args:
+ rng: random number generator used to select flags and values for them
+ count: number of tests to generate based on each base test
+ fuzzers: list of FuzzerConfig instances
+ disable_analysis: disable analysis phase and filtering base on it. When
+ set, processor passes None as analysis result to fuzzers
+ """
+ super(FuzzerProc, self).__init__('Fuzzer')
+
+ self._rng = rng
+ self._count = count
+ self._fuzzer_configs = fuzzers
+ self._disable_analysis = disable_analysis
+ self._gens = {}
+
+ def setup(self, requirement=base.DROP_RESULT):
+ # Fuzzer is optimized to not store the results
+ assert requirement == base.DROP_RESULT
+ super(FuzzerProc, self).setup(requirement)
+
+ def _next_test(self, test):
+ if self.is_stopped:
+ return
+
+ analysis_subtest = self._create_analysis_subtest(test)
+ if analysis_subtest:
+ self._send_test(analysis_subtest)
+ else:
+ self._gens[test.procid] = self._create_gen(test)
+ self._try_send_next_test(test)
+
+ def _create_analysis_subtest(self, test):
+ if self._disable_analysis:
+ return None
+
+ analysis_flags = []
+ for fuzzer_config in self._fuzzer_configs:
+ if fuzzer_config.analyzer:
+ analysis_flags += fuzzer_config.analyzer.get_analysis_flags()
+
+ if analysis_flags:
+ analysis_flags = list(set(analysis_flags))
+ return self._create_subtest(test, 'analysis', flags=analysis_flags,
+ keep_output=True)
+
+
+ def _result_for(self, test, subtest, result):
+ if not self._disable_analysis:
+ if result is not None:
+ # Analysis phase, for fuzzing we drop the result.
+ if result.has_unexpected_output:
+ self._send_result(test, None)
+ return
+ self._gens[test.procid] = self._create_gen(test, result)
+
+ self._try_send_next_test(test)
+
+ def _create_gen(self, test, analysis_result=None):
+ # It will be called with analysis_result==None only when there is no
+ # analysis phase at all, so no fuzzer has it's own analyzer.
+ gens = []
+ indexes = []
+ for i, fuzzer_config in enumerate(self._fuzzer_configs):
+ analysis_value = None
+ if analysis_result and fuzzer_config.analyzer:
+ analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
+ if not analysis_value:
+ # Skip fuzzer for this test since it doesn't have analysis data
+ continue
+ p = fuzzer_config.probability
+ flag_gen = fuzzer_config.fuzzer.create_flags_generator(self._rng, test,
+ analysis_value)
+ indexes += [len(gens)] * p
+ gens.append((p, flag_gen))
+
+ if not gens:
+ # No fuzzers for this test, skip it
+ return
+
+ i = 0
+ while not self._count or i < self._count:
+ main_index = self._rng.choice(indexes)
+ _, main_gen = gens[main_index]
+
+ flags = next(main_gen)
+ for index, (p, gen) in enumerate(gens):
+ if index == main_index:
+ continue
+ if self._rng.randint(1, 10) <= p:
+ flags += next(gen)
+
+ flags.append('--fuzzer-random-seed=%s' % self._next_seed())
+ yield self._create_subtest(test, str(i), flags=flags)
+
+ i += 1
+
+ def _try_send_next_test(self, test):
+ if not self.is_stopped:
+ for subtest in self._gens[test.procid]:
+ self._send_test(subtest)
+ return
+
+ del self._gens[test.procid]
+ self._send_result(test, None)
+
+ def _next_seed(self):
+ seed = None
+ while not seed:
+ seed = self._rng.randint(-2147483648, 2147483647)
+ return seed
+
+
+class ScavengeAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Maximum new space size reached = '):
+ return int(float(line.split()[7]))
+
+
+class ScavengeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-scavenge=%d' % (analysis_value or 100)]
+
+
+class MarkingAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Maximum marking limit reached = '):
+ return int(float(line.split()[6]))
+
+
+class MarkingFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-marking=%d' % (analysis_value or 100)]
+
+
+class GcIntervalAnalyzer(Analyzer):
+ def get_analysis_flags(self):
+ return ['--fuzzer-gc-analysis']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('### Allocations = '):
+ return int(float(line.split()[3][:-1]))
+
+
+class GcIntervalFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ if analysis_value:
+ value = analysis_value / 10
+ else:
+ value = 10000
+ while True:
+ yield ['--random-gc-interval=%d' % value]
+
+
+class CompactionFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stress-compaction-random']
+
+
+class ThreadPoolSizeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
+
+
+class InterruptBudgetFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ limit = 1 + int(rng.random() * 144)
+ yield ['--interrupt-budget=%d' % rng.randint(1, limit * 1024)]
+
+
+class DeoptAnalyzer(Analyzer):
+ MAX_DEOPT=1000000000
+
+ def __init__(self, min_interval):
+ super(DeoptAnalyzer, self).__init__()
+ self._min = min_interval
+
+ def get_analysis_flags(self):
+ return ['--deopt-every-n-times=%d' % self.MAX_DEOPT,
+ '--print-deopt-stress']
+
+ def do_analysis(self, result):
+ for line in reversed(result.output.stdout.splitlines()):
+ if line.startswith('=== Stress deopt counter: '):
+ counter = self.MAX_DEOPT - int(line.split(' ')[-1])
+ if counter < self._min:
+ # Skip this test since we won't generate any meaningful interval with
+ # given minimum.
+ return None
+ return counter
+
+
+class DeoptFuzzer(Fuzzer):
+ def __init__(self, min_interval):
+ super(DeoptFuzzer, self).__init__()
+ self._min = min_interval
+
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ if analysis_value:
+ value = analysis_value / 2
+ else:
+ value = 10000
+ interval = rng.randint(self._min, max(value, self._min))
+ yield ['--deopt-every-n-times=%d' % interval]
+
+
+FUZZERS = {
+ 'compaction': (None, CompactionFuzzer),
+ 'deopt': (DeoptAnalyzer, DeoptFuzzer),
+ 'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
+ 'interrupt_budget': (None, InterruptBudgetFuzzer),
+ 'marking': (MarkingAnalyzer, MarkingFuzzer),
+ 'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+ 'threads': (None, ThreadPoolSizeFuzzer),
+}
+
+
+def create_fuzzer_config(name, probability, *args, **kwargs):
+ analyzer_class, fuzzer_class = FUZZERS[name]
+ return FuzzerConfig(
+ probability,
+ analyzer_class(*args, **kwargs) if analyzer_class else None,
+ fuzzer_class(*args, **kwargs),
+ )
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 78514f7252..221c64bfdd 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -103,6 +103,15 @@ class SimpleProgressIndicator(ProgressIndicator):
class VerboseProgressIndicator(SimpleProgressIndicator):
+ def __init__(self):
+ super(VerboseProgressIndicator, self).__init__()
+ self._last_printed_time = time.time()
+
+ def _print(self, text):
+ print text
+ sys.stdout.flush()
+ self._last_printed_time = time.time()
+
def _on_result_for(self, test, result):
super(VerboseProgressIndicator, self)._on_result_for(test, result)
# TODO(majeski): Support for dummy/grouped results
@@ -113,12 +122,13 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
outcome = 'FAIL'
else:
outcome = 'pass'
- print 'Done running %s: %s' % (test, outcome)
- sys.stdout.flush()
+ self._print('Done running %s: %s' % (test, outcome))
def _on_heartbeat(self):
- print 'Still working...'
- sys.stdout.flush()
+ if time.time() - self._last_printed_time > 30:
+ # Print something every 30 seconds to not get killed by an output
+ # timeout.
+ self._print('Still working...')
class DotsProgressIndicator(SimpleProgressIndicator):
@@ -292,7 +302,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
- def __init__(self, json_test_results, arch, mode, random_seed):
+ def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that,
@@ -303,7 +313,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
- self.random_seed = random_seed
self.results = []
self.tests = []
@@ -338,10 +347,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
-
- # TODO(machenbach): This stores only the global random seed from the
- # context and not possible overrides when using random-seed stress.
- "random_seed": self.random_seed,
+ "random_seed": test.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
})
diff --git a/deps/v8/tools/testrunner/testproc/rerun.py b/deps/v8/tools/testrunner/testproc/rerun.py
index 7f96e0260c..a72bb3ebc6 100644
--- a/deps/v8/tools/testrunner/testproc/rerun.py
+++ b/deps/v8/tools/testrunner/testproc/rerun.py
@@ -34,7 +34,7 @@ class RerunProc(base.TestProcProducer):
results = self._results[test.procid]
results.append(result)
- if self._needs_rerun(test, result):
+ if not self.is_stopped and self._needs_rerun(test, result):
self._rerun[test.procid] += 1
if self._rerun_total_left is not None:
self._rerun_total_left -= 1
diff --git a/deps/v8/tools/testrunner/testproc/seed.py b/deps/v8/tools/testrunner/testproc/seed.py
new file mode 100644
index 0000000000..3f40e79b34
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/seed.py
@@ -0,0 +1,58 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+from collections import defaultdict
+
+from . import base
+from ..utils import random_utils
+
+
+class SeedProc(base.TestProcProducer):
+ def __init__(self, count, seed=None, parallel_subtests=1):
+ """
+ Args:
+ count: How many subtests with different seeds to create for each test.
+ 0 means infinite.
+ seed: seed to use. None means random seed for each subtest.
+ parallel_subtests: How many subtest of each test to run at the same time.
+ """
+ super(SeedProc, self).__init__('Seed')
+ self._count = count
+ self._seed = seed
+ self._last_idx = defaultdict(int)
+ self._todo = defaultdict(int)
+ self._parallel_subtests = parallel_subtests
+ if count:
+ self._parallel_subtests = min(self._parallel_subtests, count)
+
+ def setup(self, requirement=base.DROP_RESULT):
+ super(SeedProc, self).setup(requirement)
+
+ # SeedProc is optimized for dropping the result
+ assert requirement == base.DROP_RESULT
+
+ def _next_test(self, test):
+ for _ in xrange(0, self._parallel_subtests):
+ self._try_send_next_test(test)
+
+ def _result_for(self, test, subtest, result):
+ self._todo[test.procid] -= 1
+ self._try_send_next_test(test)
+
+ def _try_send_next_test(self, test):
+ def create_subtest(idx):
+ seed = self._seed or random_utils.random_seed()
+ return self._create_subtest(test, idx, random_seed=seed)
+
+ num = self._last_idx[test.procid]
+ if not self._count or num < self._count:
+ num += 1
+ self._send_test(create_subtest(num))
+ self._todo[test.procid] += 1
+ self._last_idx[test.procid] = num
+ elif not self._todo.get(test.procid):
+ del self._last_idx[test.procid]
+ del self._todo[test.procid]
+ self._send_result(test, None)
diff --git a/deps/v8/tools/testrunner/testproc/sigproc.py b/deps/v8/tools/testrunner/testproc/sigproc.py
new file mode 100644
index 0000000000..e97fe7ece3
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/sigproc.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import signal
+
+from . import base
+from testrunner.local import utils
+
+
+class SignalProc(base.TestProcObserver):
+ def __init__(self):
+ super(SignalProc, self).__init__()
+ self.exit_code = utils.EXIT_CODE_PASS
+
+ def setup(self, *args, **kwargs):
+ super(SignalProc, self).setup(*args, **kwargs)
+ # It should be called after processors are chained together to not loose
+ # catched signal.
+ signal.signal(signal.SIGINT, self._on_ctrlc)
+ signal.signal(signal.SIGTERM, self._on_sigterm)
+
+ def _on_ctrlc(self, _signum, _stack_frame):
+ print '>>> Ctrl-C detected, early abort...'
+ self.exit_code = utils.EXIT_CODE_INTERRUPTED
+ self.stop()
+
+ def _on_sigterm(self, _signum, _stack_frame):
+ print '>>> SIGTERM received, early abort...'
+ self.exit_code = utils.EXIT_CODE_TERMINATED
+ self.stop()
diff --git a/deps/v8/tools/testrunner/testproc/timeout.py b/deps/v8/tools/testrunner/testproc/timeout.py
new file mode 100644
index 0000000000..84ddc656e2
--- /dev/null
+++ b/deps/v8/tools/testrunner/testproc/timeout.py
@@ -0,0 +1,28 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from . import base
+
+
+class TimeoutProc(base.TestProcObserver):
+ def __init__(self, duration_sec):
+ super(TimeoutProc, self).__init__()
+ self._duration_sec = duration_sec
+ self._start = time.time()
+
+ def _on_next_test(self, test):
+ self._on_event()
+
+ def _on_result_for(self, test, result):
+ self._on_event()
+
+ def _on_heartbeat(self):
+ self._on_event()
+
+ def _on_event(self):
+ if not self.is_stopped:
+ if time.time() - self._start > self._duration_sec:
+ self.stop()
diff --git a/deps/v8/tools/testrunner/trycatch_loader.js b/deps/v8/tools/testrunner/trycatch_loader.js
new file mode 100644
index 0000000000..737c8e45db
--- /dev/null
+++ b/deps/v8/tools/testrunner/trycatch_loader.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Wrapper loading javascript tests passed as arguments used by gc fuzzer.
+// It ignores all exceptions and run tests in a separate namespaces.
+//
+// It can't prevent %AbortJS function from aborting execution, so it should be
+// used with d8's --disable-abortjs flag to ignore all possible errors inside
+// tests.
+
+// We use -- as an additional separator for test preamble files and test files.
+// The preamble files (before --) will be loaded in each realm before each
+// test.
+var separator = arguments.indexOf("--")
+var preamble = arguments.slice(0, separator)
+var tests = arguments.slice(separator + 1)
+
+var preambleString = ""
+for (let jstest of preamble) {
+ preambleString += "load(\"" + jstest + "\");"
+}
+
+for (let jstest of tests) {
+ print("Loading " + jstest);
+ let start = performance.now();
+
+ // anonymous function to not populate global namespace.
+ (function () {
+ let realm = Realm.create();
+ try {
+ Realm.eval(realm, preambleString + "load(\"" + jstest + "\");");
+ } catch (err) {
+ // ignore all errors
+ }
+ Realm.dispose(realm);
+ })();
+
+ let durationSec = ((performance.now() - start) / 1000.0).toFixed(2);
+ print("Duration " + durationSec + "s");
+}
diff --git a/deps/v8/tools/testrunner/utils/__init__.py b/deps/v8/tools/testrunner/utils/__init__.py
new file mode 100644
index 0000000000..4433538556
--- /dev/null
+++ b/deps/v8/tools/testrunner/utils/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/deps/v8/tools/testrunner/utils/random_utils.py b/deps/v8/tools/testrunner/utils/random_utils.py
new file mode 100644
index 0000000000..0d2cb3fa95
--- /dev/null
+++ b/deps/v8/tools/testrunner/utils/random_utils.py
@@ -0,0 +1,13 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+
+def random_seed():
+ """Returns random, non-zero seed."""
+ seed = 0
+ while not seed:
+ seed = random.SystemRandom().randint(-2147483648, 2147483647)
+ return seed
diff --git a/deps/v8/tools/toolchain/BUILD.gn b/deps/v8/tools/toolchain/BUILD.gn
new file mode 100644
index 0000000000..b2462054c4
--- /dev/null
+++ b/deps/v8/tools/toolchain/BUILD.gn
@@ -0,0 +1,23 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/gcc_toolchain.gni")
+
+gcc_toolchain("mips-bundled") {
+ toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+ root_build_dir)
+ cc = "${toolprefix}gcc"
+ cxx = "${toolprefix}g++"
+
+ readelf = "${toolprefix}readelf"
+ nm = "${toolprefix}nm"
+ ar = "${toolprefix}ar"
+ ld = cxx
+
+ toolchain_args = {
+ current_cpu = "mips"
+ current_os = "linux"
+ is_clang = false
+ }
+}
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index cad836b2e3..b77ccafa63 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -20,6 +20,12 @@ BOTS = {
'--nexus10': 'v8_nexus10_perf_try',
}
+# This list will contain builder names that should be triggered on an internal
+# swarming bucket instead of internal Buildbot master.
+SWARMING_BOTS = [
+ 'v8_linux64_perf_try',
+]
+
DEFAULT_BOTS = [
'v8_arm32_perf_try',
'v8_linux32_perf_try',
@@ -50,6 +56,17 @@ PUBLIC_BENCHMARKS = [
V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
+def _trigger_bots(bucket, bots, options):
+ cmd = ['git cl try']
+ cmd += ['-B', bucket]
+ cmd += ['-b %s' % bot for bot in bots]
+ if options.revision: cmd += ['-r %s' % options.revision]
+ benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
+ cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
+ if options.extra_flags:
+ cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
+ subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
+
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
@@ -89,14 +106,13 @@ def main():
subprocess.check_output(
'update_depot_tools', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
- cmd = ['git cl try -m internal.client.v8']
- cmd += ['-b %s' % bot for bot in options.bots]
- if options.revision: cmd += ['-r %s' % options.revision]
- benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
- cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
- if options.extra_flags:
- cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
- subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
+ buildbot_bots = [bot for bot in options.bots if bot not in SWARMING_BOTS]
+ if buildbot_bots:
+ _trigger_bots('master.internal.client.v8', buildbot_bots, options)
+
+ swarming_bots = [bot for bot in options.bots if bot in SWARMING_BOTS]
+ if swarming_bots:
+ _trigger_bots('luci.v8-internal.try', swarming_bots, options)
if __name__ == '__main__': # pragma: no cover
diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html
index 552e83783a..2167d21a14 100644
--- a/deps/v8/tools/turbolizer/index.html
+++ b/deps/v8/tools/turbolizer/index.html
@@ -5,13 +5,13 @@
<link rel="stylesheet" href="turbo-visualizer.css" />
</head>
<body>
- <div id="left">
+ <div id="left" class="viewpane">
<div id='source-text'>
<pre id='source-text-pre'\>
</div>
</div>
<div class="resizer-left"></div>
- <div id="middle" class="resizable-pane">
+ <div id="middle" class="viewpane">
<div id="graph-toolbox-anchor">
<span id="graph-toolbox">
<input id="layout" type="image" title="layout graph" src="layout-icon.png"
@@ -55,7 +55,7 @@
</text></svg></div>
</div>
<div class="resizer-right"></div>
- <div id="right">
+ <div id="right" class="viewpane">
<div id='disassembly'>
<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
<ul id='disassembly-list' class='nolinenums noindent'>
diff --git a/deps/v8/tools/turbolizer/monkey.js b/deps/v8/tools/turbolizer/monkey.js
index 129f8b3268..29eaaebb36 100644
--- a/deps/v8/tools/turbolizer/monkey.js
+++ b/deps/v8/tools/turbolizer/monkey.js
@@ -2,25 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-Array.prototype.getStaggeredFromMiddle = function(i) {
- if (i >= this.length) {
- throw("getStaggeredFromMiddle: OOB");
- }
- var middle = Math.floor(this.length / 2);
- var index = middle + (((i % 2) == 0) ? (i / 2) : (((1 - i) / 2) - 1));
- return this[index];
-}
-
-Array.prototype.contains = function(obj) {
- var i = this.length;
- while (i--) {
- if (this[i] === obj) {
- return true;
- }
- }
- return false;
-}
-
Math.alignUp = function(raw, multiple) {
return Math.floor((raw + multiple - 1) / multiple) * multiple;
}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
index 7fd9c4852a..95fcba7928 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.css
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -242,15 +242,20 @@ span.linkable-text:hover {
#left {
- float: left; height: 100%; background-color: #FFFFFF;
+ float: left;
}
#middle {
- float:left; height: 100%; background-color: #F8F8F8;
+ float:left; background-color: #F8F8F8;
}
#right {
- float: right; background-color: #FFFFFF;
+ float: right;
+}
+
+.viewpane {
+ height: 100vh;
+ background-color: #FFFFFF;
}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.js b/deps/v8/tools/turbolizer/turbo-visualizer.js
index c04384810b..0c720b22a4 100644
--- a/deps/v8/tools/turbolizer/turbo-visualizer.js
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.js
@@ -65,18 +65,15 @@ class Snapper {
}
setDisassemblyExpanded(newState) {
- console.log(newState)
if (this.disassemblyExpand.classed("invisible") === newState) return;
this.disassemblyExpandUpdate(newState);
let resizer = this.resizer;
if (newState) {
resizer.sep_right = resizer.sep_right_snap;
resizer.sep_right_snap = resizer.client_width;
- console.log("set expand")
} else {
resizer.sep_right_snap = resizer.sep_right;
resizer.sep_right = resizer.client_width;
- console.log("set collapse")
}
resizer.updatePanes();
}
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index f4ff3fe1f7..4fb6aaff13 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -101,6 +101,8 @@ def run_tests(basedir, *args, **kwargs):
sys_args = ['--command-prefix', sys.executable] + list(args)
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
+ else:
+ sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(
basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
@@ -145,7 +147,9 @@ class SystemTest(unittest.TestCase):
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
+ from testrunner.local import command
from testrunner.local import pool
+ command.setup_testing()
pool.setup_testing()
@classmethod
@@ -170,10 +174,11 @@ class SystemTest(unittest.TestCase):
'sweet/bananas',
'sweet/raspberries',
)
- self.assertIn('Running 4 tests', result.stdout, result)
+ self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
- self.assertIn('Total time:', result.stderr, result)
- self.assertIn('sweet/bananas', result.stderr, result)
+ # TODO(majeski): Implement for test processors
+ # self.assertIn('Total time:', result.stderr, result)
+ # self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
def testShardedProc(self):
@@ -199,6 +204,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ @unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
with temp_base() as basedir:
@@ -222,7 +228,7 @@ class SystemTest(unittest.TestCase):
def testFailProc(self):
self.testFail(infra_staging=True)
- def testFail(self, infra_staging=False):
+ def testFail(self, infra_staging=True):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
@@ -269,7 +275,7 @@ class SystemTest(unittest.TestCase):
def testFailWithRerunAndJSONProc(self):
self.testFailWithRerunAndJSON(infra_staging=True)
- def testFailWithRerunAndJSON(self, infra_staging=False):
+ def testFailWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -303,12 +309,13 @@ class SystemTest(unittest.TestCase):
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
+ self.maxDiff = None
self.check_cleaned_json_output('expected_test_results1.json', json_path)
def testFlakeWithRerunAndJSONProc(self):
self.testFlakeWithRerunAndJSON(infra_staging=True)
- def testFlakeWithRerunAndJSON(self, infra_staging=False):
+ def testFlakeWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -334,6 +341,7 @@ class SystemTest(unittest.TestCase):
'Done running sweet/bananaflakes: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ self.maxDiff = None
self.check_cleaned_json_output('expected_test_results2.json', json_path)
def testAutoDetect(self):
@@ -374,7 +382,7 @@ class SystemTest(unittest.TestCase):
def testSkipsProc(self):
self.testSkips(infra_staging=True)
- def testSkips(self, infra_staging=False):
+ def testSkips(self, infra_staging=True):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
@@ -390,12 +398,12 @@ class SystemTest(unittest.TestCase):
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
- self.assertEqual(0, result.returncode, result)
+ self.assertEqual(2, result.returncode, result)
def testDefaultProc(self):
self.testDefault(infra_staging=True)
- def testDefault(self, infra_staging=False):
+ def testDefault(self, infra_staging=True):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
@@ -410,14 +418,14 @@ class SystemTest(unittest.TestCase):
else:
self.assertIn('Running 0 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
- self.assertEqual(0, result.returncode, result)
+ self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
with temp_base() as basedir:
result = run_tests(basedir)
self.assertIn('Failed to load build config', result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testGNOption(self):
"""Test using gn option, but no gn build folder is found."""
@@ -433,7 +441,7 @@ class SystemTest(unittest.TestCase):
result = run_tests(basedir, '--mode=Release')
self.assertIn('execution mode (release) for release is inconsistent '
'with build config (debug)', result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
@@ -442,13 +450,13 @@ class SystemTest(unittest.TestCase):
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testWrongVariant(self):
"""Test using a bogus variant."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--variants=meh')
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
@@ -457,6 +465,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testReport(self):
"""Test the report feature.
@@ -475,6 +484,7 @@ class SystemTest(unittest.TestCase):
result.stdout, result)
self.assertEqual(1, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testWarnUnusedRules(self):
"""Test the unused-rules feature."""
with temp_base() as basedir:
@@ -489,6 +499,7 @@ class SystemTest(unittest.TestCase):
self.assertIn( 'Unused rule: regress/', result.stdout, result)
self.assertEqual(1, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testCatNoSources(self):
"""Test printing sources, but the suite's tests have none available."""
with temp_base() as basedir:
@@ -506,7 +517,7 @@ class SystemTest(unittest.TestCase):
def testPredictableProc(self):
self.testPredictable(infra_staging=True)
- def testPredictable(self, infra_staging=False):
+ def testPredictable(self, infra_staging=True):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
@@ -547,7 +558,10 @@ class SystemTest(unittest.TestCase):
# timeout was used.
self.assertEqual(0, result.returncode, result)
- def testRandomSeedStressWithDefault(self):
+ def testRandomSeedStressWithDefaultProc(self):
+ self.testRandomSeedStressWithDefault(infra_staging=True)
+
+ def testRandomSeedStressWithDefault(self, infra_staging=True):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
@@ -557,8 +571,13 @@ class SystemTest(unittest.TestCase):
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
+ infra_staging=infra_staging,
)
- self.assertIn('Running 2 tests', result.stdout, result)
+ if infra_staging:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ else:
+ self.assertIn('Running 2 tests', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
@@ -573,7 +592,8 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'sweet/strawberries',
)
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
@@ -598,7 +618,8 @@ class SystemTest(unittest.TestCase):
)
# Both tests are either marked as running in only default or only
# slow variant.
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Running 2 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
@@ -611,7 +632,7 @@ class SystemTest(unittest.TestCase):
def testDotsProgressProc(self):
self.testDotsProgress(infra_staging=True)
- def testDotsProgress(self, infra_staging=False):
+ def testDotsProgress(self, infra_staging=True):
with temp_base() as basedir:
result = run_tests(
basedir,
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index 172b87a5d6..e889ecabce 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -41,20 +41,20 @@
"result": "FAIL",
"run": 2,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -62,40 +62,40 @@
"result": "FAIL",
"run": 3,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
index 7fcfe47f71..cdb4766e95 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 0,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
@@ -41,29 +41,29 @@
"result": "PASS",
"run": 2,
"stderr": "",
- "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
"name": "sweet/bananaflakes"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
index 115471ac72..1fcf2864b6 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -10,7 +10,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
return map(
self._create_test,
['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
@@ -24,8 +24,8 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'd8_mocked.py'
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [self.name]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
index 9407769b35..a986af5c2f 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -10,7 +10,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
return map(
self._create_test,
['bananaflakes'],
@@ -24,8 +24,8 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'd8_mocked.py'
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [self.name]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index c96741a9a1..5659cdd03c 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -49,44 +49,53 @@ INSTANCE_TYPES = {
145: "FIXED_FLOAT32_ARRAY_TYPE",
146: "FIXED_FLOAT64_ARRAY_TYPE",
147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 148: "FIXED_DOUBLE_ARRAY_TYPE",
- 149: "FILLER_TYPE",
- 150: "ACCESS_CHECK_INFO_TYPE",
- 151: "ACCESSOR_INFO_TYPE",
- 152: "ACCESSOR_PAIR_TYPE",
- 153: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 154: "ALLOCATION_MEMENTO_TYPE",
- 155: "ALLOCATION_SITE_TYPE",
- 156: "ASYNC_GENERATOR_REQUEST_TYPE",
- 157: "CONTEXT_EXTENSION_TYPE",
- 158: "DEBUG_INFO_TYPE",
- 159: "FUNCTION_TEMPLATE_INFO_TYPE",
- 160: "INTERCEPTOR_INFO_TYPE",
- 161: "MODULE_INFO_ENTRY_TYPE",
- 162: "MODULE_TYPE",
- 163: "OBJECT_TEMPLATE_INFO_TYPE",
- 164: "PROMISE_REACTION_JOB_INFO_TYPE",
- 165: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
- 166: "PROTOTYPE_INFO_TYPE",
- 167: "SCRIPT_TYPE",
- 168: "STACK_FRAME_INFO_TYPE",
- 169: "TUPLE2_TYPE",
- 170: "TUPLE3_TYPE",
- 171: "FIXED_ARRAY_TYPE",
- 172: "DESCRIPTOR_ARRAY_TYPE",
- 173: "HASH_TABLE_TYPE",
- 174: "TRANSITION_ARRAY_TYPE",
- 175: "CELL_TYPE",
- 176: "CODE_DATA_CONTAINER_TYPE",
- 177: "FEEDBACK_VECTOR_TYPE",
- 178: "LOAD_HANDLER_TYPE",
- 179: "PROPERTY_ARRAY_TYPE",
- 180: "PROPERTY_CELL_TYPE",
- 181: "SHARED_FUNCTION_INFO_TYPE",
- 182: "SMALL_ORDERED_HASH_MAP_TYPE",
- 183: "SMALL_ORDERED_HASH_SET_TYPE",
- 184: "STORE_HANDLER_TYPE",
- 185: "WEAK_CELL_TYPE",
+ 148: "FIXED_BIGINT64_ARRAY_TYPE",
+ 149: "FIXED_BIGUINT64_ARRAY_TYPE",
+ 150: "FIXED_DOUBLE_ARRAY_TYPE",
+ 151: "FILLER_TYPE",
+ 152: "ACCESS_CHECK_INFO_TYPE",
+ 153: "ACCESSOR_INFO_TYPE",
+ 154: "ACCESSOR_PAIR_TYPE",
+ 155: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 156: "ALLOCATION_MEMENTO_TYPE",
+ 157: "ALLOCATION_SITE_TYPE",
+ 158: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 159: "CONTEXT_EXTENSION_TYPE",
+ 160: "DEBUG_INFO_TYPE",
+ 161: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 162: "INTERCEPTOR_INFO_TYPE",
+ 163: "MODULE_INFO_ENTRY_TYPE",
+ 164: "MODULE_TYPE",
+ 165: "OBJECT_TEMPLATE_INFO_TYPE",
+ 166: "PROMISE_CAPABILITY_TYPE",
+ 167: "PROMISE_REACTION_TYPE",
+ 168: "PROTOTYPE_INFO_TYPE",
+ 169: "SCRIPT_TYPE",
+ 170: "STACK_FRAME_INFO_TYPE",
+ 171: "TUPLE2_TYPE",
+ 172: "TUPLE3_TYPE",
+ 173: "CALLABLE_TASK_TYPE",
+ 174: "CALLBACK_TASK_TYPE",
+ 175: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 176: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 177: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 178: "FIXED_ARRAY_TYPE",
+ 179: "DESCRIPTOR_ARRAY_TYPE",
+ 180: "HASH_TABLE_TYPE",
+ 181: "SCOPE_INFO_TYPE",
+ 182: "TRANSITION_ARRAY_TYPE",
+ 183: "CELL_TYPE",
+ 184: "CODE_DATA_CONTAINER_TYPE",
+ 185: "FEEDBACK_CELL_TYPE",
+ 186: "FEEDBACK_VECTOR_TYPE",
+ 187: "LOAD_HANDLER_TYPE",
+ 188: "PROPERTY_ARRAY_TYPE",
+ 189: "PROPERTY_CELL_TYPE",
+ 190: "SHARED_FUNCTION_INFO_TYPE",
+ 191: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 192: "SMALL_ORDERED_HASH_SET_TYPE",
+ 193: "STORE_HANDLER_TYPE",
+ 194: "WEAK_CELL_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -131,35 +140,39 @@ INSTANCE_TYPES = {
1091: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
1092: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
1093: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1094: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1095: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1096: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1097: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1098: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1099: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1100: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 1101: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
- 1102: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
- 1103: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
- 1104: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
- 1105: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
- 1106: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
- 1107: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
- 1108: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
- 1109: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
- 1110: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 1111: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 1112: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
- 1113: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
- 1114: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 1115: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 1116: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
- 1117: "WASM_INSTANCE_TYPE",
- 1118: "WASM_MEMORY_TYPE",
- 1119: "WASM_MODULE_TYPE",
- 1120: "WASM_TABLE_TYPE",
- 1121: "JS_BOUND_FUNCTION_TYPE",
- 1122: "JS_FUNCTION_TYPE",
+ 1094: "JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1095: "JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1096: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1097: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1098: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1099: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1100: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1101: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1102: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 1103: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1104: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 1105: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1106: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 1107: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1108: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1109: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 1110: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1111: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+ 1112: "JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1113: "JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 1114: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1115: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 1116: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+ 1117: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+ 1118: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1119: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 1120: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+ 1121: "WASM_INSTANCE_TYPE",
+ 1122: "WASM_MEMORY_TYPE",
+ 1123: "WASM_MODULE_TYPE",
+ 1124: "WASM_TABLE_TYPE",
+ 1125: "JS_BOUND_FUNCTION_TYPE",
+ 1126: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
@@ -167,10 +180,10 @@ KNOWN_MAPS = {
0x02201: (138, "FreeSpaceMap"),
0x02251: (132, "MetaMap"),
0x022a1: (131, "NullMap"),
- 0x022f1: (172, "DescriptorArrayMap"),
- 0x02341: (171, "FixedArrayMap"),
- 0x02391: (149, "OnePointerFillerMap"),
- 0x023e1: (149, "TwoPointerFillerMap"),
+ 0x022f1: (179, "DescriptorArrayMap"),
+ 0x02341: (178, "FixedArrayMap"),
+ 0x02391: (151, "OnePointerFillerMap"),
+ 0x023e1: (151, "TwoPointerFillerMap"),
0x02431: (131, "UninitializedMap"),
0x02481: (8, "OneByteInternalizedStringMap"),
0x024d1: (131, "UndefinedMap"),
@@ -178,108 +191,116 @@ KNOWN_MAPS = {
0x02571: (131, "TheHoleMap"),
0x025c1: (131, "BooleanMap"),
0x02611: (136, "ByteArrayMap"),
- 0x02661: (171, "FixedCOWArrayMap"),
- 0x026b1: (173, "HashTableMap"),
+ 0x02661: (178, "FixedCOWArrayMap"),
+ 0x026b1: (180, "HashTableMap"),
0x02701: (128, "SymbolMap"),
0x02751: (72, "OneByteStringMap"),
- 0x027a1: (171, "ScopeInfoMap"),
- 0x027f1: (181, "SharedFunctionInfoMap"),
+ 0x027a1: (181, "ScopeInfoMap"),
+ 0x027f1: (190, "SharedFunctionInfoMap"),
0x02841: (133, "CodeMap"),
- 0x02891: (171, "FunctionContextMap"),
- 0x028e1: (175, "CellMap"),
- 0x02931: (185, "WeakCellMap"),
- 0x02981: (180, "GlobalPropertyCellMap"),
+ 0x02891: (178, "FunctionContextMap"),
+ 0x028e1: (183, "CellMap"),
+ 0x02931: (194, "WeakCellMap"),
+ 0x02981: (189, "GlobalPropertyCellMap"),
0x029d1: (135, "ForeignMap"),
- 0x02a21: (174, "TransitionArrayMap"),
- 0x02a71: (177, "FeedbackVectorMap"),
+ 0x02a21: (182, "TransitionArrayMap"),
+ 0x02a71: (186, "FeedbackVectorMap"),
0x02ac1: (131, "ArgumentsMarkerMap"),
0x02b11: (131, "ExceptionMap"),
0x02b61: (131, "TerminationExceptionMap"),
0x02bb1: (131, "OptimizedOutMap"),
0x02c01: (131, "StaleRegisterMap"),
- 0x02c51: (171, "NativeContextMap"),
- 0x02ca1: (171, "ModuleContextMap"),
- 0x02cf1: (171, "EvalContextMap"),
- 0x02d41: (171, "ScriptContextMap"),
- 0x02d91: (171, "BlockContextMap"),
- 0x02de1: (171, "CatchContextMap"),
- 0x02e31: (171, "WithContextMap"),
- 0x02e81: (171, "DebugEvaluateContextMap"),
- 0x02ed1: (171, "ScriptContextTableMap"),
- 0x02f21: (171, "ArrayListMap"),
- 0x02f71: (148, "FixedDoubleArrayMap"),
+ 0x02c51: (178, "NativeContextMap"),
+ 0x02ca1: (178, "ModuleContextMap"),
+ 0x02cf1: (178, "EvalContextMap"),
+ 0x02d41: (178, "ScriptContextMap"),
+ 0x02d91: (178, "BlockContextMap"),
+ 0x02de1: (178, "CatchContextMap"),
+ 0x02e31: (178, "WithContextMap"),
+ 0x02e81: (178, "DebugEvaluateContextMap"),
+ 0x02ed1: (178, "ScriptContextTableMap"),
+ 0x02f21: (178, "ArrayListMap"),
+ 0x02f71: (150, "FixedDoubleArrayMap"),
0x02fc1: (134, "MutableHeapNumberMap"),
- 0x03011: (173, "OrderedHashMapMap"),
- 0x03061: (173, "OrderedHashSetMap"),
- 0x030b1: (173, "NameDictionaryMap"),
- 0x03101: (173, "GlobalDictionaryMap"),
- 0x03151: (173, "NumberDictionaryMap"),
- 0x031a1: (173, "StringTableMap"),
- 0x031f1: (173, "WeakHashTableMap"),
- 0x03241: (171, "SloppyArgumentsElementsMap"),
- 0x03291: (182, "SmallOrderedHashMapMap"),
- 0x032e1: (183, "SmallOrderedHashSetMap"),
- 0x03331: (176, "CodeDataContainerMap"),
- 0x03381: (1071, "JSMessageObjectMap"),
- 0x033d1: (1057, "ExternalMap"),
- 0x03421: (137, "BytecodeArrayMap"),
- 0x03471: (171, "ModuleInfoMap"),
- 0x034c1: (175, "NoClosuresCellMap"),
- 0x03511: (175, "OneClosureCellMap"),
- 0x03561: (175, "ManyClosuresCellMap"),
- 0x035b1: (179, "PropertyArrayMap"),
- 0x03601: (130, "BigIntMap"),
- 0x03651: (106, "NativeSourceStringMap"),
- 0x036a1: (64, "StringMap"),
- 0x036f1: (73, "ConsOneByteStringMap"),
- 0x03741: (65, "ConsStringMap"),
- 0x03791: (77, "ThinOneByteStringMap"),
- 0x037e1: (69, "ThinStringMap"),
- 0x03831: (67, "SlicedStringMap"),
- 0x03881: (75, "SlicedOneByteStringMap"),
- 0x038d1: (66, "ExternalStringMap"),
- 0x03921: (82, "ExternalStringWithOneByteDataMap"),
- 0x03971: (74, "ExternalOneByteStringMap"),
- 0x039c1: (98, "ShortExternalStringMap"),
- 0x03a11: (114, "ShortExternalStringWithOneByteDataMap"),
- 0x03a61: (0, "InternalizedStringMap"),
- 0x03ab1: (2, "ExternalInternalizedStringMap"),
- 0x03b01: (18, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x03b51: (10, "ExternalOneByteInternalizedStringMap"),
- 0x03ba1: (34, "ShortExternalInternalizedStringMap"),
- 0x03bf1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x03c41: (42, "ShortExternalOneByteInternalizedStringMap"),
- 0x03c91: (106, "ShortExternalOneByteStringMap"),
- 0x03ce1: (140, "FixedUint8ArrayMap"),
- 0x03d31: (139, "FixedInt8ArrayMap"),
- 0x03d81: (142, "FixedUint16ArrayMap"),
- 0x03dd1: (141, "FixedInt16ArrayMap"),
- 0x03e21: (144, "FixedUint32ArrayMap"),
- 0x03e71: (143, "FixedInt32ArrayMap"),
- 0x03ec1: (145, "FixedFloat32ArrayMap"),
- 0x03f11: (146, "FixedFloat64ArrayMap"),
- 0x03f61: (147, "FixedUint8ClampedArrayMap"),
- 0x03fb1: (169, "Tuple2Map"),
- 0x04001: (167, "ScriptMap"),
- 0x04051: (160, "InterceptorInfoMap"),
- 0x040a1: (151, "AccessorInfoMap"),
- 0x040f1: (150, "AccessCheckInfoMap"),
- 0x04141: (152, "AccessorPairMap"),
- 0x04191: (153, "AliasedArgumentsEntryMap"),
- 0x041e1: (154, "AllocationMementoMap"),
- 0x04231: (155, "AllocationSiteMap"),
- 0x04281: (156, "AsyncGeneratorRequestMap"),
- 0x042d1: (157, "ContextExtensionMap"),
- 0x04321: (158, "DebugInfoMap"),
- 0x04371: (159, "FunctionTemplateInfoMap"),
- 0x043c1: (161, "ModuleInfoEntryMap"),
- 0x04411: (162, "ModuleMap"),
- 0x04461: (163, "ObjectTemplateInfoMap"),
- 0x044b1: (164, "PromiseReactionJobInfoMap"),
- 0x04501: (165, "PromiseResolveThenableJobInfoMap"),
- 0x04551: (166, "PrototypeInfoMap"),
- 0x045a1: (168, "StackFrameInfoMap"),
- 0x045f1: (170, "Tuple3Map"),
+ 0x03011: (180, "OrderedHashMapMap"),
+ 0x03061: (180, "OrderedHashSetMap"),
+ 0x030b1: (180, "NameDictionaryMap"),
+ 0x03101: (180, "GlobalDictionaryMap"),
+ 0x03151: (180, "NumberDictionaryMap"),
+ 0x031a1: (180, "SimpleNumberDictionaryMap"),
+ 0x031f1: (180, "StringTableMap"),
+ 0x03241: (180, "WeakHashTableMap"),
+ 0x03291: (178, "SloppyArgumentsElementsMap"),
+ 0x032e1: (191, "SmallOrderedHashMapMap"),
+ 0x03331: (192, "SmallOrderedHashSetMap"),
+ 0x03381: (184, "CodeDataContainerMap"),
+ 0x033d1: (1071, "JSMessageObjectMap"),
+ 0x03421: (1057, "ExternalMap"),
+ 0x03471: (137, "BytecodeArrayMap"),
+ 0x034c1: (178, "ModuleInfoMap"),
+ 0x03511: (185, "NoClosuresCellMap"),
+ 0x03561: (185, "OneClosureCellMap"),
+ 0x035b1: (185, "ManyClosuresCellMap"),
+ 0x03601: (188, "PropertyArrayMap"),
+ 0x03651: (130, "BigIntMap"),
+ 0x036a1: (106, "NativeSourceStringMap"),
+ 0x036f1: (64, "StringMap"),
+ 0x03741: (73, "ConsOneByteStringMap"),
+ 0x03791: (65, "ConsStringMap"),
+ 0x037e1: (77, "ThinOneByteStringMap"),
+ 0x03831: (69, "ThinStringMap"),
+ 0x03881: (67, "SlicedStringMap"),
+ 0x038d1: (75, "SlicedOneByteStringMap"),
+ 0x03921: (66, "ExternalStringMap"),
+ 0x03971: (82, "ExternalStringWithOneByteDataMap"),
+ 0x039c1: (74, "ExternalOneByteStringMap"),
+ 0x03a11: (98, "ShortExternalStringMap"),
+ 0x03a61: (114, "ShortExternalStringWithOneByteDataMap"),
+ 0x03ab1: (0, "InternalizedStringMap"),
+ 0x03b01: (2, "ExternalInternalizedStringMap"),
+ 0x03b51: (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x03ba1: (10, "ExternalOneByteInternalizedStringMap"),
+ 0x03bf1: (34, "ShortExternalInternalizedStringMap"),
+ 0x03c41: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x03c91: (42, "ShortExternalOneByteInternalizedStringMap"),
+ 0x03ce1: (106, "ShortExternalOneByteStringMap"),
+ 0x03d31: (140, "FixedUint8ArrayMap"),
+ 0x03d81: (139, "FixedInt8ArrayMap"),
+ 0x03dd1: (142, "FixedUint16ArrayMap"),
+ 0x03e21: (141, "FixedInt16ArrayMap"),
+ 0x03e71: (144, "FixedUint32ArrayMap"),
+ 0x03ec1: (143, "FixedInt32ArrayMap"),
+ 0x03f11: (145, "FixedFloat32ArrayMap"),
+ 0x03f61: (146, "FixedFloat64ArrayMap"),
+ 0x03fb1: (147, "FixedUint8ClampedArrayMap"),
+ 0x04001: (149, "FixedBigUint64ArrayMap"),
+ 0x04051: (148, "FixedBigInt64ArrayMap"),
+ 0x040a1: (171, "Tuple2Map"),
+ 0x040f1: (169, "ScriptMap"),
+ 0x04141: (162, "InterceptorInfoMap"),
+ 0x04191: (153, "AccessorInfoMap"),
+ 0x041e1: (152, "AccessCheckInfoMap"),
+ 0x04231: (154, "AccessorPairMap"),
+ 0x04281: (155, "AliasedArgumentsEntryMap"),
+ 0x042d1: (156, "AllocationMementoMap"),
+ 0x04321: (157, "AllocationSiteMap"),
+ 0x04371: (158, "AsyncGeneratorRequestMap"),
+ 0x043c1: (159, "ContextExtensionMap"),
+ 0x04411: (160, "DebugInfoMap"),
+ 0x04461: (161, "FunctionTemplateInfoMap"),
+ 0x044b1: (163, "ModuleInfoEntryMap"),
+ 0x04501: (164, "ModuleMap"),
+ 0x04551: (165, "ObjectTemplateInfoMap"),
+ 0x045a1: (166, "PromiseCapabilityMap"),
+ 0x045f1: (167, "PromiseReactionMap"),
+ 0x04641: (168, "PrototypeInfoMap"),
+ 0x04691: (170, "StackFrameInfoMap"),
+ 0x046e1: (172, "Tuple3Map"),
+ 0x04731: (173, "CallableTaskMap"),
+ 0x04781: (174, "CallbackTaskMap"),
+ 0x047d1: (175, "PromiseFulfillReactionJobTaskMap"),
+ 0x04821: (176, "PromiseRejectReactionJobTaskMap"),
+ 0x04871: (177, "PromiseResolveThenableJobTaskMap"),
}
# List of known V8 objects.
@@ -311,24 +332,24 @@ KNOWN_OBJECTS = {
("OLD_SPACE", 0x02721): "EmptyFixedFloat32Array",
("OLD_SPACE", 0x02741): "EmptyFixedFloat64Array",
("OLD_SPACE", 0x02761): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x02781): "EmptyScript",
- ("OLD_SPACE", 0x02809): "UndefinedCell",
- ("OLD_SPACE", 0x02819): "EmptySloppyArgumentsElements",
- ("OLD_SPACE", 0x02839): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x02881): "EmptyOrderedHashMap",
- ("OLD_SPACE", 0x028a9): "EmptyOrderedHashSet",
- ("OLD_SPACE", 0x028d1): "EmptyPropertyCell",
- ("OLD_SPACE", 0x028f9): "EmptyWeakCell",
- ("OLD_SPACE", 0x02969): "NoElementsProtector",
- ("OLD_SPACE", 0x02991): "IsConcatSpreadableProtector",
- ("OLD_SPACE", 0x029a1): "SpeciesProtector",
- ("OLD_SPACE", 0x029c9): "StringLengthProtector",
- ("OLD_SPACE", 0x029d9): "FastArrayIterationProtector",
- ("OLD_SPACE", 0x029e9): "ArrayIteratorProtector",
- ("OLD_SPACE", 0x02a11): "ArrayBufferNeuteringProtector",
- ("OLD_SPACE", 0x02a39): "InfinityValue",
- ("OLD_SPACE", 0x02a49): "MinusZeroValue",
- ("OLD_SPACE", 0x02a59): "MinusInfinityValue",
+ ("OLD_SPACE", 0x027c1): "EmptyScript",
+ ("OLD_SPACE", 0x02849): "ManyClosuresCell",
+ ("OLD_SPACE", 0x02859): "EmptySloppyArgumentsElements",
+ ("OLD_SPACE", 0x02879): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x028c1): "EmptyOrderedHashMap",
+ ("OLD_SPACE", 0x028e9): "EmptyOrderedHashSet",
+ ("OLD_SPACE", 0x02911): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x02939): "EmptyWeakCell",
+ ("OLD_SPACE", 0x029a9): "NoElementsProtector",
+ ("OLD_SPACE", 0x029d1): "IsConcatSpreadableProtector",
+ ("OLD_SPACE", 0x029e1): "SpeciesProtector",
+ ("OLD_SPACE", 0x02a09): "StringLengthProtector",
+ ("OLD_SPACE", 0x02a19): "FastArrayIterationProtector",
+ ("OLD_SPACE", 0x02a29): "ArrayIteratorProtector",
+ ("OLD_SPACE", 0x02a51): "ArrayBufferNeuteringProtector",
+ ("OLD_SPACE", 0x02ac9): "InfinityValue",
+ ("OLD_SPACE", 0x02ad9): "MinusZeroValue",
+ ("OLD_SPACE", 0x02ae9): "MinusInfinityValue",
}
# List of known V8 Frame Markers.
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index c4d18a3333..92aaa8fd3c 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -28,6 +28,9 @@ mkdir ${SPEC_TEST_DIR}/tmp
./tools/dev/gm.py x64.release d8
cd ${V8_DIR}/test/wasm-js/interpreter
+
+# The next step requires that ocaml is installed. See the README.md in
+# ${V8_DIR}/test/wasm-js/interpreter/.
make clean all
cd ${V8_DIR}/test/wasm-js/test/core
@@ -42,4 +45,11 @@ echo
echo "The following files will get uploaded:"
ls tests
echo
+
+# For the following command you first have to authenticate with google cloud
+# storage. For that you have to execute
+#
+# > gsutil.py config
+#
+# When the script asks you for your project-id, use 0.
upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 83f006688c..2367b2ccc8 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,4 +7,6 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly...
-The bartender starts to shake the bottles.......................
+The bartender starts to shake the bottles........
+.
+.